blob: 0fa1a698a6c14c82753755de90f7115a694da2d7 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
Jonathan Peytonde4749b2016-12-14 23:01:24 +00002 * kmp_sched.cpp -- static scheduling -- iteration initialization
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
Jim Cownie5e8470a2013-09-27 10:38:44 +00005//===----------------------------------------------------------------------===//
6//
7// The LLVM Compiler Infrastructure
8//
9// This file is dual licensed under the MIT and the University of Illinois Open
10// Source Licenses. See LICENSE.txt for details.
11//
12//===----------------------------------------------------------------------===//
13
Jonathan Peyton30419822017-05-12 18:01:32 +000014/* Static scheduling initialization.
15
16 NOTE: team->t.t_nproc is a constant inside of any dispatch loop, however
17 it may change values between parallel regions. __kmp_max_nth
18 is the largest value __kmp_nth may take, 1 is the smallest. */
Jim Cownie5e8470a2013-09-27 10:38:44 +000019
20#include "kmp.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000021#include "kmp_error.h"
Jonathan Peyton30419822017-05-12 18:01:32 +000022#include "kmp_i18n.h"
Jim Cownie4cc4bb42014-10-07 16:25:50 +000023#include "kmp_itt.h"
Jonathan Peyton30419822017-05-12 18:01:32 +000024#include "kmp_stats.h"
25#include "kmp_str.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000026
Andrey Churbanovd7d088f2015-04-29 16:42:24 +000027#if OMPT_SUPPORT
28#include "ompt-specific.h"
29#endif
30
Jim Cownie5e8470a2013-09-27 10:38:44 +000031#ifdef KMP_DEBUG
32//-------------------------------------------------------------------------
33// template for debug prints specification ( d, u, lld, llu )
Jonathan Peyton30419822017-05-12 18:01:32 +000034char const *traits_t<int>::spec = "d";
35char const *traits_t<unsigned int>::spec = "u";
36char const *traits_t<long long>::spec = "lld";
37char const *traits_t<unsigned long long>::spec = "llu";
Jim Cownie5e8470a2013-09-27 10:38:44 +000038//-------------------------------------------------------------------------
39#endif
40
Jonathan Peyton30419822017-05-12 18:01:32 +000041template <typename T>
42static void __kmp_for_static_init(ident_t *loc, kmp_int32 global_tid,
43 kmp_int32 schedtype, kmp_int32 *plastiter,
44 T *plower, T *pupper,
45 typename traits_t<T>::signed_t *pstride,
46 typename traits_t<T>::signed_t incr,
47 typename traits_t<T>::signed_t chunk) {
48 KMP_COUNT_BLOCK(OMP_FOR_static);
49 KMP_TIME_PARTITIONED_BLOCK(FOR_static_scheduling);
Jonathan Peyton45be4502015-08-11 21:36:41 +000050
Jonathan Peyton30419822017-05-12 18:01:32 +000051 typedef typename traits_t<T>::unsigned_t UT;
52 typedef typename traits_t<T>::signed_t ST;
53 /* this all has to be changed back to TID and such.. */
Ed Maste414544c2017-07-07 21:06:05 +000054 kmp_int32 gtid = global_tid;
55 kmp_uint32 tid;
56 kmp_uint32 nth;
57 UT trip_count;
58 kmp_team_t *team;
59 kmp_info_t *th = __kmp_threads[gtid];
Jim Cownie5e8470a2013-09-27 10:38:44 +000060
Andrey Churbanovd7d088f2015-04-29 16:42:24 +000061#if OMPT_SUPPORT && OMPT_TRACE
Jonathan Peyton30419822017-05-12 18:01:32 +000062 ompt_team_info_t *team_info = NULL;
63 ompt_task_info_t *task_info = NULL;
Jonathan Peytonf0344bb2015-10-09 17:42:52 +000064
Jonathan Peyton30419822017-05-12 18:01:32 +000065 if (ompt_enabled) {
66 // Only fully initialize variables needed by OMPT if OMPT is enabled.
67 team_info = __ompt_get_teaminfo(0, NULL);
68 task_info = __ompt_get_taskinfo(0);
69 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +000070#endif
71
Jonathan Peyton30419822017-05-12 18:01:32 +000072 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride);
73 KE_TRACE(10, ("__kmpc_for_static_init called (%d)\n", global_tid));
74#ifdef KMP_DEBUG
75 {
76 const char *buff;
77 // create format specifiers before the debug output
78 buff = __kmp_str_format(
79 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s,"
80 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
81 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
82 traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
83 KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,
84 *pstride, incr, chunk));
85 __kmp_str_free(&buff);
86 }
87#endif
88
89 if (__kmp_env_consistency_check) {
90 __kmp_push_workshare(global_tid, ct_pdo, loc);
91 if (incr == 0) {
92 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
93 loc);
94 }
95 }
96 /* special handling for zero-trip loops */
97 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
98 if (plastiter != NULL)
99 *plastiter = FALSE;
100 /* leave pupper and plower set to entire iteration space */
101 *pstride = incr; /* value should never be used */
102// *plower = *pupper - incr;
103// let compiler bypass the illegal loop (like for(i=1;i<10;i--))
104// THE LINE COMMENTED ABOVE CAUSED shape2F/h_tests_1.f TO HAVE A FAILURE
105// ON A ZERO-TRIP LOOP (lower=1, upper=0,stride=1) - JPH June 23, 2009.
106#ifdef KMP_DEBUG
Jim Cownie5e8470a2013-09-27 10:38:44 +0000107 {
Jonathan Peyton30419822017-05-12 18:01:32 +0000108 const char *buff;
109 // create format specifiers before the debug output
110 buff = __kmp_str_format("__kmpc_for_static_init:(ZERO TRIP) liter=%%d "
111 "lower=%%%s upper=%%%s stride = %%%s "
112 "signed?<%s>, loc = %%s\n",
113 traits_t<T>::spec, traits_t<T>::spec,
114 traits_t<ST>::spec, traits_t<T>::spec);
115 KD_TRACE(100,
116 (buff, *plastiter, *plower, *pupper, *pstride, loc->psource));
117 __kmp_str_free(&buff);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000118 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000119#endif
120 KE_TRACE(10, ("__kmpc_for_static_init: T#%d return\n", global_tid));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000121
122#if OMPT_SUPPORT && OMPT_TRACE
Jonathan Peyton30419822017-05-12 18:01:32 +0000123 if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
124 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
125 team_info->parallel_id, task_info->task_id, team_info->microtask);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000126 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000127#endif
128 KMP_COUNT_VALUE(FOR_static_iterations, 0);
129 return;
130 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000131
Jonathan Peyton30419822017-05-12 18:01:32 +0000132#if OMP_40_ENABLED
133 // Although there are schedule enumerations above kmp_ord_upper which are not
134 // schedules for "distribute", the only ones which are useful are dynamic, so
135 // cannot be seen here, since this codepath is only executed for static
136 // schedules.
137 if (schedtype > kmp_ord_upper) {
138 // we are in DISTRIBUTE construct
139 schedtype += kmp_sch_static -
140 kmp_distribute_static; // AC: convert to usual schedule type
141 tid = th->th.th_team->t.t_master_tid;
142 team = th->th.th_team->t.t_parent;
143 } else
144#endif
145 {
146 tid = __kmp_tid_from_gtid(global_tid);
147 team = th->th.th_team;
148 }
149
150 /* determine if "for" loop is an active worksharing construct */
151 if (team->t.t_serialized) {
152 /* serialized parallel, each thread executes whole iteration space */
153 if (plastiter != NULL)
154 *plastiter = TRUE;
155 /* leave pupper and plower set to entire iteration space */
156 *pstride =
157 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
158
159#ifdef KMP_DEBUG
Jim Cownie5e8470a2013-09-27 10:38:44 +0000160 {
Jonathan Peyton30419822017-05-12 18:01:32 +0000161 const char *buff;
162 // create format specifiers before the debug output
163 buff = __kmp_str_format("__kmpc_for_static_init: (serial) liter=%%d "
164 "lower=%%%s upper=%%%s stride = %%%s\n",
165 traits_t<T>::spec, traits_t<T>::spec,
166 traits_t<ST>::spec);
167 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
168 __kmp_str_free(&buff);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000169 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000170#endif
171 KE_TRACE(10, ("__kmpc_for_static_init: T#%d return\n", global_tid));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000172
173#if OMPT_SUPPORT && OMPT_TRACE
Jonathan Peyton30419822017-05-12 18:01:32 +0000174 if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
175 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
176 team_info->parallel_id, task_info->task_id, team_info->microtask);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000177 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000178#endif
179 return;
180 }
181 nth = team->t.t_nproc;
182 if (nth == 1) {
183 if (plastiter != NULL)
184 *plastiter = TRUE;
185 *pstride =
186 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
187#ifdef KMP_DEBUG
188 {
189 const char *buff;
190 // create format specifiers before the debug output
191 buff = __kmp_str_format("__kmpc_for_static_init: (serial) liter=%%d "
192 "lower=%%%s upper=%%%s stride = %%%s\n",
193 traits_t<T>::spec, traits_t<T>::spec,
194 traits_t<ST>::spec);
195 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
196 __kmp_str_free(&buff);
197 }
198#endif
199 KE_TRACE(10, ("__kmpc_for_static_init: T#%d return\n", global_tid));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000200
201#if OMPT_SUPPORT && OMPT_TRACE
Jonathan Peyton30419822017-05-12 18:01:32 +0000202 if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
203 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
204 team_info->parallel_id, task_info->task_id, team_info->microtask);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000205 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000206#endif
207 return;
208 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000209
Jonathan Peyton30419822017-05-12 18:01:32 +0000210 /* compute trip count */
211 if (incr == 1) {
212 trip_count = *pupper - *plower + 1;
213 } else if (incr == -1) {
214 trip_count = *plower - *pupper + 1;
215 } else if (incr > 0) {
216 // upper-lower can exceed the limit of signed type
217 trip_count = (UT)(*pupper - *plower) / incr + 1;
218 } else {
219 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
220 }
221
222 if (__kmp_env_consistency_check) {
223 /* tripcount overflow? */
224 if (trip_count == 0 && *pupper != *plower) {
225 __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
226 loc);
227 }
228 }
229 KMP_COUNT_VALUE(FOR_static_iterations, trip_count);
230
231 /* compute remaining parameters */
232 switch (schedtype) {
233 case kmp_sch_static: {
234 if (trip_count < nth) {
235 KMP_DEBUG_ASSERT(
236 __kmp_static == kmp_sch_static_greedy ||
237 __kmp_static ==
238 kmp_sch_static_balanced); // Unknown static scheduling type.
239 if (tid < trip_count) {
240 *pupper = *plower = *plower + tid * incr;
241 } else {
242 *plower = *pupper + incr;
243 }
244 if (plastiter != NULL)
245 *plastiter = (tid == trip_count - 1);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000246 } else {
Jonathan Peyton30419822017-05-12 18:01:32 +0000247 if (__kmp_static == kmp_sch_static_balanced) {
Ed Maste414544c2017-07-07 21:06:05 +0000248 UT small_chunk = trip_count / nth;
249 UT extras = trip_count % nth;
Jonathan Peyton30419822017-05-12 18:01:32 +0000250 *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
251 *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
252 if (plastiter != NULL)
253 *plastiter = (tid == nth - 1);
254 } else {
Ed Maste414544c2017-07-07 21:06:05 +0000255 T big_chunk_inc_count =
Jonathan Peyton30419822017-05-12 18:01:32 +0000256 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
Ed Maste414544c2017-07-07 21:06:05 +0000257 T old_upper = *pupper;
Jonathan Peyton30419822017-05-12 18:01:32 +0000258
259 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
260 // Unknown static scheduling type.
261
262 *plower += tid * big_chunk_inc_count;
263 *pupper = *plower + big_chunk_inc_count - incr;
264 if (incr > 0) {
265 if (*pupper < *plower)
266 *pupper = traits_t<T>::max_value;
267 if (plastiter != NULL)
268 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
269 if (*pupper > old_upper)
270 *pupper = old_upper; // tracker C73258
271 } else {
272 if (*pupper > *plower)
273 *pupper = traits_t<T>::min_value;
274 if (plastiter != NULL)
275 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
276 if (*pupper < old_upper)
277 *pupper = old_upper; // tracker C73258
278 }
279 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000280 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000281 *pstride = trip_count;
282 break;
283 }
284 case kmp_sch_static_chunked: {
Ed Maste414544c2017-07-07 21:06:05 +0000285 ST span;
Jonathan Peyton30419822017-05-12 18:01:32 +0000286 if (chunk < 1) {
287 chunk = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000288 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000289 span = chunk * incr;
290 *pstride = span * nth;
291 *plower = *plower + (span * tid);
292 *pupper = *plower + span - incr;
293 if (plastiter != NULL)
294 *plastiter = (tid == ((trip_count - 1) / (UT)chunk) % nth);
295 break;
296 }
Jonathan Peytondf6818b2016-06-14 17:57:47 +0000297#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +0000298 case kmp_sch_static_balanced_chunked: {
Ed Maste414544c2017-07-07 21:06:05 +0000299 T old_upper = *pupper;
Jonathan Peyton30419822017-05-12 18:01:32 +0000300 // round up to make sure the chunk is enough to cover all iterations
Ed Maste414544c2017-07-07 21:06:05 +0000301 UT span = (trip_count + nth - 1) / nth;
Jonathan Peytonef734792016-05-31 19:12:18 +0000302
Jonathan Peyton30419822017-05-12 18:01:32 +0000303 // perform chunk adjustment
304 chunk = (span + chunk - 1) & ~(chunk - 1);
Jonathan Peytonef734792016-05-31 19:12:18 +0000305
Jonathan Peyton30419822017-05-12 18:01:32 +0000306 span = chunk * incr;
307 *plower = *plower + (span * tid);
308 *pupper = *plower + span - incr;
309 if (incr > 0) {
310 if (*pupper > old_upper)
311 *pupper = old_upper;
312 } else if (*pupper < old_upper)
313 *pupper = old_upper;
Jonathan Peytonef734792016-05-31 19:12:18 +0000314
Jonathan Peyton30419822017-05-12 18:01:32 +0000315 if (plastiter != NULL)
316 *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
317 break;
318 }
Jonathan Peytonef734792016-05-31 19:12:18 +0000319#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000320 default:
321 KMP_ASSERT2(0, "__kmpc_for_static_init: unknown scheduling type");
322 break;
323 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000324
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000325#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +0000326 // Report loop metadata
327 if (KMP_MASTER_TID(tid) && __itt_metadata_add_ptr &&
328 __kmp_forkjoin_frames_mode == 3 &&
Andrey Churbanov51aecb82015-05-06 19:22:36 +0000329#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +0000330 th->th.th_teams_microtask == NULL &&
Andrey Churbanov51aecb82015-05-06 19:22:36 +0000331#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000332 team->t.t_active_level == 1) {
333 kmp_uint64 cur_chunk = chunk;
334 // Calculate chunk in case it was not specified; it is specified for
335 // kmp_sch_static_chunked
336 if (schedtype == kmp_sch_static) {
337 cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000338 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000339 // 0 - "static" schedule
340 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
341 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000342#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000343#ifdef KMP_DEBUG
344 {
345 const char *buff;
346 // create format specifiers before the debug output
347 buff = __kmp_str_format("__kmpc_for_static_init: liter=%%d lower=%%%s "
348 "upper=%%%s stride = %%%s signed?<%s>\n",
349 traits_t<T>::spec, traits_t<T>::spec,
350 traits_t<ST>::spec, traits_t<T>::spec);
351 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
352 __kmp_str_free(&buff);
353 }
354#endif
355 KE_TRACE(10, ("__kmpc_for_static_init: T#%d return\n", global_tid));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000356
357#if OMPT_SUPPORT && OMPT_TRACE
Jonathan Peyton30419822017-05-12 18:01:32 +0000358 if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
359 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
360 team_info->parallel_id, task_info->task_id, team_info->microtask);
361 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000362#endif
363
Jonathan Peyton30419822017-05-12 18:01:32 +0000364 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000365}
366
Jonathan Peyton30419822017-05-12 18:01:32 +0000367template <typename T>
368static void __kmp_dist_for_static_init(ident_t *loc, kmp_int32 gtid,
369 kmp_int32 schedule, kmp_int32 *plastiter,
370 T *plower, T *pupper, T *pupperDist,
371 typename traits_t<T>::signed_t *pstride,
372 typename traits_t<T>::signed_t incr,
373 typename traits_t<T>::signed_t chunk) {
374 KMP_COUNT_BLOCK(OMP_DISTRIBUTE);
375 typedef typename traits_t<T>::unsigned_t UT;
376 typedef typename traits_t<T>::signed_t ST;
Ed Maste414544c2017-07-07 21:06:05 +0000377 kmp_uint32 tid;
378 kmp_uint32 nth;
379 kmp_uint32 team_id;
380 kmp_uint32 nteams;
381 UT trip_count;
382 kmp_team_t *team;
Jonathan Peyton30419822017-05-12 18:01:32 +0000383 kmp_info_t *th;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000384
Jonathan Peyton30419822017-05-12 18:01:32 +0000385 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride);
386 KE_TRACE(10, ("__kmpc_dist_for_static_init called (%d)\n", gtid));
387#ifdef KMP_DEBUG
388 {
389 const char *buff;
390 // create format specifiers before the debug output
391 buff = __kmp_str_format(
392 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "
393 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
394 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
395 traits_t<ST>::spec, traits_t<T>::spec);
396 KD_TRACE(100,
397 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk));
398 __kmp_str_free(&buff);
399 }
400#endif
401
402 if (__kmp_env_consistency_check) {
403 __kmp_push_workshare(gtid, ct_pdo, loc);
404 if (incr == 0) {
405 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
406 loc);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000407 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000408 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
409 // The loop is illegal.
410 // Some zero-trip loops maintained by compiler, e.g.:
411 // for(i=10;i<0;++i) // lower >= upper - run-time check
412 // for(i=0;i>10;--i) // lower <= upper - run-time check
413 // for(i=0;i>10;++i) // incr > 0 - compile-time check
414 // for(i=10;i<0;--i) // incr < 0 - compile-time check
415 // Compiler does not check the following illegal loops:
416 // for(i=0;i<10;i+=incr) // where incr<0
417 // for(i=10;i>0;i-=incr) // where incr<0
418 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000419 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000420 }
421 tid = __kmp_tid_from_gtid(gtid);
422 th = __kmp_threads[gtid];
423 nth = th->th.th_team_nproc;
424 team = th->th.th_team;
425#if OMP_40_ENABLED
426 KMP_DEBUG_ASSERT(th->th.th_teams_microtask); // we are in the teams construct
427 nteams = th->th.th_teams_size.nteams;
428#endif
429 team_id = team->t.t_master_tid;
430 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000431
Jonathan Peyton30419822017-05-12 18:01:32 +0000432 // compute global trip count
433 if (incr == 1) {
434 trip_count = *pupper - *plower + 1;
435 } else if (incr == -1) {
436 trip_count = *plower - *pupper + 1;
437 } else if (incr > 0) {
438 // upper-lower can exceed the limit of signed type
439 trip_count = (UT)(*pupper - *plower) / incr + 1;
440 } else {
441 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
442 }
443
444 *pstride = *pupper - *plower; // just in case (can be unused)
445 if (trip_count <= nteams) {
446 KMP_DEBUG_ASSERT(
447 __kmp_static == kmp_sch_static_greedy ||
448 __kmp_static ==
449 kmp_sch_static_balanced); // Unknown static scheduling type.
450 // only masters of some teams get single iteration, other threads get
451 // nothing
452 if (team_id < trip_count && tid == 0) {
453 *pupper = *pupperDist = *plower = *plower + team_id * incr;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000454 } else {
Jonathan Peyton30419822017-05-12 18:01:32 +0000455 *pupperDist = *pupper;
456 *plower = *pupper + incr; // compiler should skip loop body
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000457 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000458 if (plastiter != NULL)
459 *plastiter = (tid == 0 && team_id == trip_count - 1);
460 } else {
461 // Get the team's chunk first (each team gets at most one chunk)
462 if (__kmp_static == kmp_sch_static_balanced) {
Ed Maste414544c2017-07-07 21:06:05 +0000463 UT chunkD = trip_count / nteams;
464 UT extras = trip_count % nteams;
Jonathan Peyton30419822017-05-12 18:01:32 +0000465 *plower +=
466 incr * (team_id * chunkD + (team_id < extras ? team_id : extras));
467 *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr);
468 if (plastiter != NULL)
469 *plastiter = (team_id == nteams - 1);
470 } else {
Ed Maste414544c2017-07-07 21:06:05 +0000471 T chunk_inc_count =
Jonathan Peyton30419822017-05-12 18:01:32 +0000472 (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr;
Ed Maste414544c2017-07-07 21:06:05 +0000473 T upper = *pupper;
Jonathan Peyton30419822017-05-12 18:01:32 +0000474 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
475 // Unknown static scheduling type.
476 *plower += team_id * chunk_inc_count;
477 *pupperDist = *plower + chunk_inc_count - incr;
478 // Check/correct bounds if needed
479 if (incr > 0) {
480 if (*pupperDist < *plower)
481 *pupperDist = traits_t<T>::max_value;
482 if (plastiter != NULL)
483 *plastiter = *plower <= upper && *pupperDist > upper - incr;
484 if (*pupperDist > upper)
485 *pupperDist = upper; // tracker C73258
486 if (*plower > *pupperDist) {
487 *pupper = *pupperDist; // no iterations available for the team
488 goto end;
489 }
490 } else {
491 if (*pupperDist > *plower)
492 *pupperDist = traits_t<T>::min_value;
493 if (plastiter != NULL)
494 *plastiter = *plower >= upper && *pupperDist < upper - incr;
495 if (*pupperDist < upper)
496 *pupperDist = upper; // tracker C73258
497 if (*plower < *pupperDist) {
498 *pupper = *pupperDist; // no iterations available for the team
499 goto end;
500 }
501 }
502 }
503 // Get the parallel loop chunk now (for thread)
504 // compute trip count for team's chunk
505 if (incr == 1) {
506 trip_count = *pupperDist - *plower + 1;
507 } else if (incr == -1) {
508 trip_count = *plower - *pupperDist + 1;
509 } else if (incr > 1) {
510 // upper-lower can exceed the limit of signed type
511 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
512 } else {
513 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
514 }
515 KMP_DEBUG_ASSERT(trip_count);
516 switch (schedule) {
517 case kmp_sch_static: {
518 if (trip_count <= nth) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000519 KMP_DEBUG_ASSERT(
Jonathan Peyton30419822017-05-12 18:01:32 +0000520 __kmp_static == kmp_sch_static_greedy ||
521 __kmp_static ==
522 kmp_sch_static_balanced); // Unknown static scheduling type.
523 if (tid < trip_count)
524 *pupper = *plower = *plower + tid * incr;
525 else
526 *plower = *pupper + incr; // no iterations available
527 if (plastiter != NULL)
528 if (*plastiter != 0 && !(tid == trip_count - 1))
529 *plastiter = 0;
530 } else {
531 if (__kmp_static == kmp_sch_static_balanced) {
Ed Maste414544c2017-07-07 21:06:05 +0000532 UT chunkL = trip_count / nth;
533 UT extras = trip_count % nth;
Jonathan Peyton30419822017-05-12 18:01:32 +0000534 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
535 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
536 if (plastiter != NULL)
537 if (*plastiter != 0 && !(tid == nth - 1))
538 *plastiter = 0;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000539 } else {
Ed Maste414544c2017-07-07 21:06:05 +0000540 T chunk_inc_count =
Jonathan Peyton30419822017-05-12 18:01:32 +0000541 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
Ed Maste414544c2017-07-07 21:06:05 +0000542 T upper = *pupperDist;
Jonathan Peyton30419822017-05-12 18:01:32 +0000543 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
544 // Unknown static scheduling type.
545 *plower += tid * chunk_inc_count;
546 *pupper = *plower + chunk_inc_count - incr;
547 if (incr > 0) {
548 if (*pupper < *plower)
549 *pupper = traits_t<T>::max_value;
550 if (plastiter != NULL)
551 if (*plastiter != 0 &&
552 !(*plower <= upper && *pupper > upper - incr))
553 *plastiter = 0;
554 if (*pupper > upper)
555 *pupper = upper; // tracker C73258
556 } else {
557 if (*pupper > *plower)
558 *pupper = traits_t<T>::min_value;
559 if (plastiter != NULL)
560 if (*plastiter != 0 &&
561 !(*plower >= upper && *pupper < upper - incr))
562 *plastiter = 0;
563 if (*pupper < upper)
564 *pupper = upper; // tracker C73258
565 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000566 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000567 }
568 break;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000569 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000570 case kmp_sch_static_chunked: {
Ed Maste414544c2017-07-07 21:06:05 +0000571 ST span;
Jonathan Peyton30419822017-05-12 18:01:32 +0000572 if (chunk < 1)
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000573 chunk = 1;
Jonathan Peyton30419822017-05-12 18:01:32 +0000574 span = chunk * incr;
575 *pstride = span * nth;
576 *plower = *plower + (span * tid);
577 *pupper = *plower + span - incr;
578 if (plastiter != NULL)
579 if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
580 *plastiter = 0;
581 break;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000582 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000583 default:
584 KMP_ASSERT2(0,
585 "__kmpc_dist_for_static_init: unknown loop scheduling type");
586 break;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000587 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000588 }
589end:;
590#ifdef KMP_DEBUG
591 {
592 const char *buff;
593 // create format specifiers before the debug output
594 buff = __kmp_str_format(
595 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "
596 "stride=%%%s signed?<%s>\n",
597 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec,
598 traits_t<ST>::spec, traits_t<T>::spec);
599 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pupperDist, *pstride));
600 __kmp_str_free(&buff);
601 }
602#endif
603 KE_TRACE(10, ("__kmpc_dist_for_static_init: T#%d return\n", gtid));
604 return;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000605}
606
Jonathan Peyton30419822017-05-12 18:01:32 +0000607template <typename T>
608static void __kmp_team_static_init(ident_t *loc, kmp_int32 gtid,
609 kmp_int32 *p_last, T *p_lb, T *p_ub,
610 typename traits_t<T>::signed_t *p_st,
611 typename traits_t<T>::signed_t incr,
612 typename traits_t<T>::signed_t chunk) {
613 // The routine returns the first chunk distributed to the team and
614 // stride for next chunks calculation.
615 // Last iteration flag set for the team that will execute
616 // the last iteration of the loop.
617 // The routine is called for dist_schedue(static,chunk) only.
618 typedef typename traits_t<T>::unsigned_t UT;
619 typedef typename traits_t<T>::signed_t ST;
620 kmp_uint32 team_id;
621 kmp_uint32 nteams;
622 UT trip_count;
623 T lower;
624 T upper;
625 ST span;
626 kmp_team_t *team;
627 kmp_info_t *th;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000628
Jonathan Peyton30419822017-05-12 18:01:32 +0000629 KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st);
630 KE_TRACE(10, ("__kmp_team_static_init called (%d)\n", gtid));
631#ifdef KMP_DEBUG
632 {
633 const char *buff;
634 // create format specifiers before the debug output
635 buff = __kmp_str_format("__kmp_team_static_init enter: T#%%d liter=%%d "
636 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
637 traits_t<T>::spec, traits_t<T>::spec,
638 traits_t<ST>::spec, traits_t<ST>::spec,
639 traits_t<T>::spec);
640 KD_TRACE(100, (buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk));
641 __kmp_str_free(&buff);
642 }
643#endif
644
645 lower = *p_lb;
646 upper = *p_ub;
647 if (__kmp_env_consistency_check) {
648 if (incr == 0) {
649 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
650 loc);
651 }
652 if (incr > 0 ? (upper < lower) : (lower < upper)) {
653 // The loop is illegal.
654 // Some zero-trip loops maintained by compiler, e.g.:
655 // for(i=10;i<0;++i) // lower >= upper - run-time check
656 // for(i=0;i>10;--i) // lower <= upper - run-time check
657 // for(i=0;i>10;++i) // incr > 0 - compile-time check
658 // for(i=10;i<0;--i) // incr < 0 - compile-time check
659 // Compiler does not check the following illegal loops:
660 // for(i=0;i<10;i+=incr) // where incr<0
661 // for(i=10;i>0;i-=incr) // where incr<0
662 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
663 }
664 }
665 th = __kmp_threads[gtid];
666 team = th->th.th_team;
667#if OMP_40_ENABLED
668 KMP_DEBUG_ASSERT(th->th.th_teams_microtask); // we are in the teams construct
669 nteams = th->th.th_teams_size.nteams;
670#endif
671 team_id = team->t.t_master_tid;
672 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
673
674 // compute trip count
675 if (incr == 1) {
676 trip_count = upper - lower + 1;
677 } else if (incr == -1) {
678 trip_count = lower - upper + 1;
679 } else if (incr > 0) {
680 // upper-lower can exceed the limit of signed type
681 trip_count = (UT)(upper - lower) / incr + 1;
682 } else {
683 trip_count = (UT)(lower - upper) / (-incr) + 1;
684 }
685 if (chunk < 1)
686 chunk = 1;
687 span = chunk * incr;
688 *p_st = span * nteams;
689 *p_lb = lower + (span * team_id);
690 *p_ub = *p_lb + span - incr;
691 if (p_last != NULL)
692 *p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
693 // Correct upper bound if needed
694 if (incr > 0) {
695 if (*p_ub < *p_lb) // overflow?
696 *p_ub = traits_t<T>::max_value;
697 if (*p_ub > upper)
698 *p_ub = upper; // tracker C73258
699 } else { // incr < 0
700 if (*p_ub > *p_lb)
701 *p_ub = traits_t<T>::min_value;
702 if (*p_ub < upper)
703 *p_ub = upper; // tracker C73258
704 }
705#ifdef KMP_DEBUG
706 {
707 const char *buff;
708 // create format specifiers before the debug output
709 buff =
710 __kmp_str_format("__kmp_team_static_init exit: T#%%d team%%u liter=%%d "
711 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
712 traits_t<T>::spec, traits_t<T>::spec,
713 traits_t<ST>::spec, traits_t<ST>::spec);
714 KD_TRACE(100, (buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk));
715 __kmp_str_free(&buff);
716 }
717#endif
718}
719
720//------------------------------------------------------------------------------
721extern "C" {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000722/*!
723@ingroup WORK_SHARING
724@param loc Source code location
725@param gtid Global thread id of this thread
726@param schedtype Scheduling type
727@param plastiter Pointer to the "last iteration" flag
728@param plower Pointer to the lower bound
729@param pupper Pointer to the upper bound
730@param pstride Pointer to the stride
731@param incr Loop increment
732@param chunk The chunk size
733
734Each of the four functions here are identical apart from the argument types.
735
Jonathan Peyton30419822017-05-12 18:01:32 +0000736The functions compute the upper and lower bounds and stride to be used for the
737set of iterations to be executed by the current thread from the statically
738scheduled loop that is described by the initial values of the bounds, stride,
739increment and chunk size.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000740
741@{
742*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000743void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype,
744 kmp_int32 *plastiter, kmp_int32 *plower,
745 kmp_int32 *pupper, kmp_int32 *pstride,
746 kmp_int32 incr, kmp_int32 chunk) {
747 __kmp_for_static_init<kmp_int32>(loc, gtid, schedtype, plastiter, plower,
748 pupper, pstride, incr, chunk);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000749}
750
751/*!
752 See @ref __kmpc_for_static_init_4
753 */
Jonathan Peyton30419822017-05-12 18:01:32 +0000754void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid,
755 kmp_int32 schedtype, kmp_int32 *plastiter,
756 kmp_uint32 *plower, kmp_uint32 *pupper,
757 kmp_int32 *pstride, kmp_int32 incr,
758 kmp_int32 chunk) {
759 __kmp_for_static_init<kmp_uint32>(loc, gtid, schedtype, plastiter, plower,
760 pupper, pstride, incr, chunk);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000761}
762
763/*!
764 See @ref __kmpc_for_static_init_4
765 */
Jonathan Peyton30419822017-05-12 18:01:32 +0000766void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype,
767 kmp_int32 *plastiter, kmp_int64 *plower,
768 kmp_int64 *pupper, kmp_int64 *pstride,
769 kmp_int64 incr, kmp_int64 chunk) {
770 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower,
771 pupper, pstride, incr, chunk);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000772}
773
774/*!
775 See @ref __kmpc_for_static_init_4
776 */
Jonathan Peyton30419822017-05-12 18:01:32 +0000777void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid,
778 kmp_int32 schedtype, kmp_int32 *plastiter,
779 kmp_uint64 *plower, kmp_uint64 *pupper,
780 kmp_int64 *pstride, kmp_int64 incr,
781 kmp_int64 chunk) {
782 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower,
783 pupper, pstride, incr, chunk);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000784}
785/*!
786@}
787*/
788
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000789/*!
790@ingroup WORK_SHARING
791@param loc Source code location
792@param gtid Global thread id of this thread
Jonathan Peyton81f9cd12015-05-22 22:37:22 +0000793@param schedule Scheduling type for the parallel loop
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000794@param plastiter Pointer to the "last iteration" flag
795@param plower Pointer to the lower bound
796@param pupper Pointer to the upper bound of loop chunk
797@param pupperD Pointer to the upper bound of dist_chunk
Jonathan Peyton81f9cd12015-05-22 22:37:22 +0000798@param pstride Pointer to the stride for parallel loop
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000799@param incr Loop increment
Jonathan Peyton81f9cd12015-05-22 22:37:22 +0000800@param chunk The chunk size for the parallel loop
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000801
802Each of the four functions here are identical apart from the argument types.
803
Jonathan Peyton30419822017-05-12 18:01:32 +0000804The functions compute the upper and lower bounds and strides to be used for the
805set of iterations to be executed by the current thread from the statically
806scheduled loop that is described by the initial values of the bounds, strides,
807increment and chunks for parallel loop and distribute constructs.
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000808
809@{
810*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000811void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid,
812 kmp_int32 schedule, kmp_int32 *plastiter,
813 kmp_int32 *plower, kmp_int32 *pupper,
814 kmp_int32 *pupperD, kmp_int32 *pstride,
815 kmp_int32 incr, kmp_int32 chunk) {
816 __kmp_dist_for_static_init<kmp_int32>(loc, gtid, schedule, plastiter, plower,
817 pupper, pupperD, pstride, incr, chunk);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000818}
819
820/*!
821 See @ref __kmpc_dist_for_static_init_4
822 */
Jonathan Peyton30419822017-05-12 18:01:32 +0000823void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid,
824 kmp_int32 schedule, kmp_int32 *plastiter,
825 kmp_uint32 *plower, kmp_uint32 *pupper,
826 kmp_uint32 *pupperD, kmp_int32 *pstride,
827 kmp_int32 incr, kmp_int32 chunk) {
828 __kmp_dist_for_static_init<kmp_uint32>(loc, gtid, schedule, plastiter, plower,
829 pupper, pupperD, pstride, incr, chunk);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000830}
831
832/*!
833 See @ref __kmpc_dist_for_static_init_4
834 */
Jonathan Peyton30419822017-05-12 18:01:32 +0000835void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid,
836 kmp_int32 schedule, kmp_int32 *plastiter,
837 kmp_int64 *plower, kmp_int64 *pupper,
838 kmp_int64 *pupperD, kmp_int64 *pstride,
839 kmp_int64 incr, kmp_int64 chunk) {
840 __kmp_dist_for_static_init<kmp_int64>(loc, gtid, schedule, plastiter, plower,
841 pupper, pupperD, pstride, incr, chunk);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000842}
843
844/*!
845 See @ref __kmpc_dist_for_static_init_4
846 */
Jonathan Peyton30419822017-05-12 18:01:32 +0000847void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid,
848 kmp_int32 schedule, kmp_int32 *plastiter,
849 kmp_uint64 *plower, kmp_uint64 *pupper,
850 kmp_uint64 *pupperD, kmp_int64 *pstride,
851 kmp_int64 incr, kmp_int64 chunk) {
852 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower,
853 pupper, pupperD, pstride, incr, chunk);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000854}
855/*!
856@}
857*/
858
Jonathan Peyton30419822017-05-12 18:01:32 +0000859//------------------------------------------------------------------------------
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000860// Auxiliary routines for Distribute Parallel Loop construct implementation
861// Transfer call to template< type T >
862// __kmp_team_static_init( ident_t *loc, int gtid,
863// int *p_last, T *lb, T *ub, ST *st, ST incr, ST chunk )
864
865/*!
866@ingroup WORK_SHARING
867@{
868@param loc Source location
869@param gtid Global thread id
870@param p_last pointer to last iteration flag
871@param p_lb pointer to Lower bound
872@param p_ub pointer to Upper bound
873@param p_st Step (or increment if you prefer)
874@param incr Loop increment
875@param chunk The chunk size to block with
876
Jonathan Peyton30419822017-05-12 18:01:32 +0000877The functions compute the upper and lower bounds and stride to be used for the
878set of iterations to be executed by the current team from the statically
879scheduled loop that is described by the initial values of the bounds, stride,
880increment and chunk for the distribute construct as part of composite distribute
881parallel loop construct. These functions are all identical apart from the types
882of the arguments.
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000883*/
884
Jonathan Peyton30419822017-05-12 18:01:32 +0000885void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
886 kmp_int32 *p_lb, kmp_int32 *p_ub,
887 kmp_int32 *p_st, kmp_int32 incr,
888 kmp_int32 chunk) {
889 KMP_DEBUG_ASSERT(__kmp_init_serial);
890 __kmp_team_static_init<kmp_int32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
891 chunk);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000892}
893
894/*!
895 See @ref __kmpc_team_static_init_4
896 */
Jonathan Peyton30419822017-05-12 18:01:32 +0000897void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
898 kmp_uint32 *p_lb, kmp_uint32 *p_ub,
899 kmp_int32 *p_st, kmp_int32 incr,
900 kmp_int32 chunk) {
901 KMP_DEBUG_ASSERT(__kmp_init_serial);
902 __kmp_team_static_init<kmp_uint32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
903 chunk);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000904}
905
906/*!
907 See @ref __kmpc_team_static_init_4
908 */
Jonathan Peyton30419822017-05-12 18:01:32 +0000909void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
910 kmp_int64 *p_lb, kmp_int64 *p_ub,
911 kmp_int64 *p_st, kmp_int64 incr,
912 kmp_int64 chunk) {
913 KMP_DEBUG_ASSERT(__kmp_init_serial);
914 __kmp_team_static_init<kmp_int64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
915 chunk);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000916}
917
918/*!
919 See @ref __kmpc_team_static_init_4
920 */
Jonathan Peyton30419822017-05-12 18:01:32 +0000921void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
922 kmp_uint64 *p_lb, kmp_uint64 *p_ub,
923 kmp_int64 *p_st, kmp_int64 incr,
924 kmp_int64 chunk) {
925 KMP_DEBUG_ASSERT(__kmp_init_serial);
926 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
927 chunk);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000928}
929/*!
930@}
931*/
932
Jim Cownie5e8470a2013-09-27 10:38:44 +0000933} // extern "C"