blob: 33feae2189f89d69e7738e59eb911baba2604a5a [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_atomic.h - ATOMIC header file
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#ifndef KMP_ATOMIC_H
17#define KMP_ATOMIC_H
18
19#include "kmp_os.h"
20#include "kmp_lock.h"
21
Andrey Churbanovd7d088f2015-04-29 16:42:24 +000022#if OMPT_SUPPORT
23#include "ompt-specific.h"
24#endif
25
Jim Cownie5e8470a2013-09-27 10:38:44 +000026// C++ build port.
27// Intel compiler does not support _Complex datatype on win.
28// Intel compiler supports _Complex datatype on lin and mac.
29// On the other side, there is a problem of stack alignment on lin_32 and mac_32
30// if the rhs is cmplx80 or cmplx128 typedef'ed datatype.
31// The decision is: to use compiler supported _Complex type on lin and mac,
32// to use typedef'ed types on win.
33// Condition for WIN64 was modified in anticipation of 10.1 build compiler.
34
Jim Cownie5e8470a2013-09-27 10:38:44 +000035#if defined( __cplusplus ) && ( KMP_OS_WINDOWS )
36 // create shortcuts for c99 complex types
37
Jim Cownie4cc4bb42014-10-07 16:25:50 +000038 #if (_MSC_VER < 1600) && defined(_DEBUG)
Jim Cownie5e8470a2013-09-27 10:38:44 +000039 // Workaround for the problem of _DebugHeapTag unresolved external.
40 // This problem prevented to use our static debug library for C tests
41 // compiled with /MDd option (the library itself built with /MTd),
42 #undef _DEBUG
43 #define _DEBUG_TEMPORARILY_UNSET_
44 #endif
45
46 #include <complex>
47
48 template< typename type_lhs, typename type_rhs >
49 std::complex< type_lhs > __kmp_lhs_div_rhs(
50 const std::complex< type_lhs >& lhs,
51 const std::complex< type_rhs >& rhs ) {
52 type_lhs a = lhs.real();
53 type_lhs b = lhs.imag();
54 type_rhs c = rhs.real();
55 type_rhs d = rhs.imag();
56 type_rhs den = c*c + d*d;
57 type_rhs r = ( a*c + b*d );
58 type_rhs i = ( b*c - a*d );
59 std::complex< type_lhs > ret( r/den, i/den );
60 return ret;
61 }
62
63 // complex8
64 struct __kmp_cmplx64_t : std::complex< double > {
65
66 __kmp_cmplx64_t() : std::complex< double > () {}
67
68 __kmp_cmplx64_t( const std::complex< double >& cd )
69 : std::complex< double > ( cd ) {}
70
71 void operator /= ( const __kmp_cmplx64_t& rhs ) {
72 std::complex< double > lhs = *this;
73 *this = __kmp_lhs_div_rhs( lhs, rhs );
74 }
75
76 __kmp_cmplx64_t operator / ( const __kmp_cmplx64_t& rhs ) {
77 std::complex< double > lhs = *this;
78 return __kmp_lhs_div_rhs( lhs, rhs );
79 }
80
81 };
82 typedef struct __kmp_cmplx64_t kmp_cmplx64;
83
84 // complex4
85 struct __kmp_cmplx32_t : std::complex< float > {
86
87 __kmp_cmplx32_t() : std::complex< float > () {}
88
89 __kmp_cmplx32_t( const std::complex<float>& cf )
90 : std::complex< float > ( cf ) {}
91
92 __kmp_cmplx32_t operator + ( const __kmp_cmplx32_t& b ) {
93 std::complex< float > lhs = *this;
94 std::complex< float > rhs = b;
95 return ( lhs + rhs );
96 }
97 __kmp_cmplx32_t operator - ( const __kmp_cmplx32_t& b ) {
98 std::complex< float > lhs = *this;
99 std::complex< float > rhs = b;
100 return ( lhs - rhs );
101 }
102 __kmp_cmplx32_t operator * ( const __kmp_cmplx32_t& b ) {
103 std::complex< float > lhs = *this;
104 std::complex< float > rhs = b;
105 return ( lhs * rhs );
106 }
107
108 __kmp_cmplx32_t operator + ( const kmp_cmplx64& b ) {
109 kmp_cmplx64 t = kmp_cmplx64( *this ) + b;
110 std::complex< double > d( t );
111 std::complex< float > f( d );
112 __kmp_cmplx32_t r( f );
113 return r;
114 }
115 __kmp_cmplx32_t operator - ( const kmp_cmplx64& b ) {
116 kmp_cmplx64 t = kmp_cmplx64( *this ) - b;
117 std::complex< double > d( t );
118 std::complex< float > f( d );
119 __kmp_cmplx32_t r( f );
120 return r;
121 }
122 __kmp_cmplx32_t operator * ( const kmp_cmplx64& b ) {
123 kmp_cmplx64 t = kmp_cmplx64( *this ) * b;
124 std::complex< double > d( t );
125 std::complex< float > f( d );
126 __kmp_cmplx32_t r( f );
127 return r;
128 }
129
130 void operator /= ( const __kmp_cmplx32_t& rhs ) {
131 std::complex< float > lhs = *this;
132 *this = __kmp_lhs_div_rhs( lhs, rhs );
133 }
134
135 __kmp_cmplx32_t operator / ( const __kmp_cmplx32_t& rhs ) {
136 std::complex< float > lhs = *this;
137 return __kmp_lhs_div_rhs( lhs, rhs );
138 }
139
140 void operator /= ( const kmp_cmplx64& rhs ) {
141 std::complex< float > lhs = *this;
142 *this = __kmp_lhs_div_rhs( lhs, rhs );
143 }
144
145 __kmp_cmplx32_t operator / ( const kmp_cmplx64& rhs ) {
146 std::complex< float > lhs = *this;
147 return __kmp_lhs_div_rhs( lhs, rhs );
148 }
149 };
150 typedef struct __kmp_cmplx32_t kmp_cmplx32;
151
152 // complex10
153 struct KMP_DO_ALIGN( 16 ) __kmp_cmplx80_t : std::complex< long double > {
154
155 __kmp_cmplx80_t() : std::complex< long double > () {}
156
157 __kmp_cmplx80_t( const std::complex< long double >& cld )
158 : std::complex< long double > ( cld ) {}
159
160 void operator /= ( const __kmp_cmplx80_t& rhs ) {
161 std::complex< long double > lhs = *this;
162 *this = __kmp_lhs_div_rhs( lhs, rhs );
163 }
164
165 __kmp_cmplx80_t operator / ( const __kmp_cmplx80_t& rhs ) {
166 std::complex< long double > lhs = *this;
167 return __kmp_lhs_div_rhs( lhs, rhs );
168 }
169
170 };
171 typedef KMP_DO_ALIGN( 16 ) struct __kmp_cmplx80_t kmp_cmplx80;
172
173 // complex16
Jim Cownie181b4bb2013-12-23 17:28:57 +0000174 #if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000175 struct __kmp_cmplx128_t : std::complex< _Quad > {
176
177 __kmp_cmplx128_t() : std::complex< _Quad > () {}
178
179 __kmp_cmplx128_t( const std::complex< _Quad >& cq )
180 : std::complex< _Quad > ( cq ) {}
181
182 void operator /= ( const __kmp_cmplx128_t& rhs ) {
183 std::complex< _Quad > lhs = *this;
184 *this = __kmp_lhs_div_rhs( lhs, rhs );
185 }
186
187 __kmp_cmplx128_t operator / ( const __kmp_cmplx128_t& rhs ) {
188 std::complex< _Quad > lhs = *this;
189 return __kmp_lhs_div_rhs( lhs, rhs );
190 }
191
192 };
193 typedef struct __kmp_cmplx128_t kmp_cmplx128;
Jim Cownie181b4bb2013-12-23 17:28:57 +0000194 #endif /* KMP_HAVE_QUAD */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000195
196 #ifdef _DEBUG_TEMPORARILY_UNSET_
197 #undef _DEBUG_TEMPORARILY_UNSET_
198 // Set it back now
199 #define _DEBUG 1
200 #endif
201
202#else
203 // create shortcuts for c99 complex types
204 typedef float _Complex kmp_cmplx32;
205 typedef double _Complex kmp_cmplx64;
206 typedef long double _Complex kmp_cmplx80;
Jim Cownie181b4bb2013-12-23 17:28:57 +0000207 #if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000208 typedef _Quad _Complex kmp_cmplx128;
Jim Cownie181b4bb2013-12-23 17:28:57 +0000209 #endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000210#endif
211
212// Compiler 12.0 changed alignment of 16 and 32-byte arguments (like _Quad
213// and kmp_cmplx128) on IA-32 architecture. The following aligned structures
214// are implemented to support the old alignment in 10.1, 11.0, 11.1 and
215// introduce the new alignment in 12.0. See CQ88405.
Jim Cownie181b4bb2013-12-23 17:28:57 +0000216#if KMP_ARCH_X86 && KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000217
218 // 4-byte aligned structures for backward compatibility.
219
220 #pragma pack( push, 4 )
221
Jim Cownie181b4bb2013-12-23 17:28:57 +0000222
Jim Cownie5e8470a2013-09-27 10:38:44 +0000223 struct KMP_DO_ALIGN( 4 ) Quad_a4_t {
224 _Quad q;
225
226 Quad_a4_t( ) : q( ) {}
227 Quad_a4_t( const _Quad & cq ) : q ( cq ) {}
228
229 Quad_a4_t operator + ( const Quad_a4_t& b ) {
230 _Quad lhs = (*this).q;
231 _Quad rhs = b.q;
232 return (Quad_a4_t)( lhs + rhs );
233 }
234
235 Quad_a4_t operator - ( const Quad_a4_t& b ) {
236 _Quad lhs = (*this).q;
237 _Quad rhs = b.q;
238 return (Quad_a4_t)( lhs - rhs );
239 }
240 Quad_a4_t operator * ( const Quad_a4_t& b ) {
241 _Quad lhs = (*this).q;
242 _Quad rhs = b.q;
243 return (Quad_a4_t)( lhs * rhs );
244 }
245
246 Quad_a4_t operator / ( const Quad_a4_t& b ) {
247 _Quad lhs = (*this).q;
248 _Quad rhs = b.q;
249 return (Quad_a4_t)( lhs / rhs );
250 }
251
252 };
253
254 struct KMP_DO_ALIGN( 4 ) kmp_cmplx128_a4_t {
255 kmp_cmplx128 q;
256
257 kmp_cmplx128_a4_t() : q () {}
258
259 kmp_cmplx128_a4_t( const kmp_cmplx128 & c128 ) : q ( c128 ) {}
260
261 kmp_cmplx128_a4_t operator + ( const kmp_cmplx128_a4_t& b ) {
262 kmp_cmplx128 lhs = (*this).q;
263 kmp_cmplx128 rhs = b.q;
264 return (kmp_cmplx128_a4_t)( lhs + rhs );
265 }
266 kmp_cmplx128_a4_t operator - ( const kmp_cmplx128_a4_t& b ) {
267 kmp_cmplx128 lhs = (*this).q;
268 kmp_cmplx128 rhs = b.q;
269 return (kmp_cmplx128_a4_t)( lhs - rhs );
270 }
271 kmp_cmplx128_a4_t operator * ( const kmp_cmplx128_a4_t& b ) {
272 kmp_cmplx128 lhs = (*this).q;
273 kmp_cmplx128 rhs = b.q;
274 return (kmp_cmplx128_a4_t)( lhs * rhs );
275 }
276
277 kmp_cmplx128_a4_t operator / ( const kmp_cmplx128_a4_t& b ) {
278 kmp_cmplx128 lhs = (*this).q;
279 kmp_cmplx128 rhs = b.q;
280 return (kmp_cmplx128_a4_t)( lhs / rhs );
281 }
282
283 };
284
285 #pragma pack( pop )
286
287 // New 16-byte aligned structures for 12.0 compiler.
288 struct KMP_DO_ALIGN( 16 ) Quad_a16_t {
289 _Quad q;
290
291 Quad_a16_t( ) : q( ) {}
292 Quad_a16_t( const _Quad & cq ) : q ( cq ) {}
293
294 Quad_a16_t operator + ( const Quad_a16_t& b ) {
295 _Quad lhs = (*this).q;
296 _Quad rhs = b.q;
297 return (Quad_a16_t)( lhs + rhs );
298 }
299
300 Quad_a16_t operator - ( const Quad_a16_t& b ) {
301 _Quad lhs = (*this).q;
302 _Quad rhs = b.q;
303 return (Quad_a16_t)( lhs - rhs );
304 }
305 Quad_a16_t operator * ( const Quad_a16_t& b ) {
306 _Quad lhs = (*this).q;
307 _Quad rhs = b.q;
308 return (Quad_a16_t)( lhs * rhs );
309 }
310
311 Quad_a16_t operator / ( const Quad_a16_t& b ) {
312 _Quad lhs = (*this).q;
313 _Quad rhs = b.q;
314 return (Quad_a16_t)( lhs / rhs );
315 }
316 };
317
318 struct KMP_DO_ALIGN( 16 ) kmp_cmplx128_a16_t {
319 kmp_cmplx128 q;
320
321 kmp_cmplx128_a16_t() : q () {}
322
323 kmp_cmplx128_a16_t( const kmp_cmplx128 & c128 ) : q ( c128 ) {}
324
325 kmp_cmplx128_a16_t operator + ( const kmp_cmplx128_a16_t& b ) {
326 kmp_cmplx128 lhs = (*this).q;
327 kmp_cmplx128 rhs = b.q;
328 return (kmp_cmplx128_a16_t)( lhs + rhs );
329 }
330 kmp_cmplx128_a16_t operator - ( const kmp_cmplx128_a16_t& b ) {
331 kmp_cmplx128 lhs = (*this).q;
332 kmp_cmplx128 rhs = b.q;
333 return (kmp_cmplx128_a16_t)( lhs - rhs );
334 }
335 kmp_cmplx128_a16_t operator * ( const kmp_cmplx128_a16_t& b ) {
336 kmp_cmplx128 lhs = (*this).q;
337 kmp_cmplx128 rhs = b.q;
338 return (kmp_cmplx128_a16_t)( lhs * rhs );
339 }
340
341 kmp_cmplx128_a16_t operator / ( const kmp_cmplx128_a16_t& b ) {
342 kmp_cmplx128 lhs = (*this).q;
343 kmp_cmplx128 rhs = b.q;
344 return (kmp_cmplx128_a16_t)( lhs / rhs );
345 }
346 };
347
348#endif
349
350#if ( KMP_ARCH_X86 )
351 #define QUAD_LEGACY Quad_a4_t
352 #define CPLX128_LEG kmp_cmplx128_a4_t
353#else
354 #define QUAD_LEGACY _Quad
355 #define CPLX128_LEG kmp_cmplx128
356#endif
357
358#ifdef __cplusplus
359 extern "C" {
360#endif
361
362extern int __kmp_atomic_mode;
363
364//
365// Atomic locks can easily become contended, so we use queuing locks for them.
366//
367
368typedef kmp_queuing_lock_t kmp_atomic_lock_t;
369
Jim Cownie181b4bb2013-12-23 17:28:57 +0000370static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000371__kmp_acquire_atomic_lock( kmp_atomic_lock_t *lck, kmp_int32 gtid )
372{
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000373#if OMPT_SUPPORT && OMPT_TRACE
Jonathan Peytonb68a85d2015-09-21 18:11:22 +0000374 if (ompt_enabled &&
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000375 ompt_callbacks.ompt_callback(ompt_event_wait_atomic)) {
376 ompt_callbacks.ompt_callback(ompt_event_wait_atomic)(
377 (ompt_wait_id_t) lck);
378 }
379#endif
380
Jim Cownie5e8470a2013-09-27 10:38:44 +0000381 __kmp_acquire_queuing_lock( lck, gtid );
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000382
383#if OMPT_SUPPORT && OMPT_TRACE
Jonathan Peytonb68a85d2015-09-21 18:11:22 +0000384 if (ompt_enabled &&
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000385 ompt_callbacks.ompt_callback(ompt_event_acquired_atomic)) {
386 ompt_callbacks.ompt_callback(ompt_event_acquired_atomic)(
387 (ompt_wait_id_t) lck);
388 }
389#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000390}
391
Jim Cownie181b4bb2013-12-23 17:28:57 +0000392static inline int
Jim Cownie5e8470a2013-09-27 10:38:44 +0000393__kmp_test_atomic_lock( kmp_atomic_lock_t *lck, kmp_int32 gtid )
394{
395 return __kmp_test_queuing_lock( lck, gtid );
396}
397
Jim Cownie181b4bb2013-12-23 17:28:57 +0000398static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000399__kmp_release_atomic_lock( kmp_atomic_lock_t *lck, kmp_int32 gtid )
400{
401 __kmp_release_queuing_lock( lck, gtid );
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000402#if OMPT_SUPPORT && OMPT_BLAME
Jonathan Peytonb68a85d2015-09-21 18:11:22 +0000403 if (ompt_enabled &&
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000404 ompt_callbacks.ompt_callback(ompt_event_release_atomic)) {
405 ompt_callbacks.ompt_callback(ompt_event_release_atomic)(
406 (ompt_wait_id_t) lck);
407 }
408#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000409}
410
Jim Cownie181b4bb2013-12-23 17:28:57 +0000411static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000412__kmp_init_atomic_lock( kmp_atomic_lock_t *lck )
413{
414 __kmp_init_queuing_lock( lck );
415}
416
Jim Cownie181b4bb2013-12-23 17:28:57 +0000417static inline void
Jim Cownie5e8470a2013-09-27 10:38:44 +0000418__kmp_destroy_atomic_lock( kmp_atomic_lock_t *lck )
419{
420 __kmp_destroy_queuing_lock( lck );
421}
422
423// Global Locks
424
425extern kmp_atomic_lock_t __kmp_atomic_lock; /* Control access to all user coded atomics in Gnu compat mode */
426extern kmp_atomic_lock_t __kmp_atomic_lock_1i; /* Control access to all user coded atomics for 1-byte fixed data types */
427extern kmp_atomic_lock_t __kmp_atomic_lock_2i; /* Control access to all user coded atomics for 2-byte fixed data types */
428extern kmp_atomic_lock_t __kmp_atomic_lock_4i; /* Control access to all user coded atomics for 4-byte fixed data types */
429extern kmp_atomic_lock_t __kmp_atomic_lock_4r; /* Control access to all user coded atomics for kmp_real32 data type */
430extern kmp_atomic_lock_t __kmp_atomic_lock_8i; /* Control access to all user coded atomics for 8-byte fixed data types */
431extern kmp_atomic_lock_t __kmp_atomic_lock_8r; /* Control access to all user coded atomics for kmp_real64 data type */
432extern kmp_atomic_lock_t __kmp_atomic_lock_8c; /* Control access to all user coded atomics for complex byte data type */
433extern kmp_atomic_lock_t __kmp_atomic_lock_10r; /* Control access to all user coded atomics for long double data type */
434extern kmp_atomic_lock_t __kmp_atomic_lock_16r; /* Control access to all user coded atomics for _Quad data type */
435extern kmp_atomic_lock_t __kmp_atomic_lock_16c; /* Control access to all user coded atomics for double complex data type*/
436extern kmp_atomic_lock_t __kmp_atomic_lock_20c; /* Control access to all user coded atomics for long double complex type*/
437extern kmp_atomic_lock_t __kmp_atomic_lock_32c; /* Control access to all user coded atomics for _Quad complex data type */
438
439//
440// Below routines for atomic UPDATE are listed
441//
442
443// 1-byte
444void __kmpc_atomic_fixed1_add( ident_t *id_ref, int gtid, char * lhs, char rhs );
445void __kmpc_atomic_fixed1_andb( ident_t *id_ref, int gtid, char * lhs, char rhs );
446void __kmpc_atomic_fixed1_div( ident_t *id_ref, int gtid, char * lhs, char rhs );
447void __kmpc_atomic_fixed1u_div( ident_t *id_ref, int gtid, unsigned char * lhs, unsigned char rhs );
448void __kmpc_atomic_fixed1_mul( ident_t *id_ref, int gtid, char * lhs, char rhs );
449void __kmpc_atomic_fixed1_orb( ident_t *id_ref, int gtid, char * lhs, char rhs );
450void __kmpc_atomic_fixed1_shl( ident_t *id_ref, int gtid, char * lhs, char rhs );
451void __kmpc_atomic_fixed1_shr( ident_t *id_ref, int gtid, char * lhs, char rhs );
452void __kmpc_atomic_fixed1u_shr( ident_t *id_ref, int gtid, unsigned char * lhs, unsigned char rhs );
453void __kmpc_atomic_fixed1_sub( ident_t *id_ref, int gtid, char * lhs, char rhs );
454void __kmpc_atomic_fixed1_xor( ident_t *id_ref, int gtid, char * lhs, char rhs );
455// 2-byte
456void __kmpc_atomic_fixed2_add( ident_t *id_ref, int gtid, short * lhs, short rhs );
457void __kmpc_atomic_fixed2_andb( ident_t *id_ref, int gtid, short * lhs, short rhs );
458void __kmpc_atomic_fixed2_div( ident_t *id_ref, int gtid, short * lhs, short rhs );
459void __kmpc_atomic_fixed2u_div( ident_t *id_ref, int gtid, unsigned short * lhs, unsigned short rhs );
460void __kmpc_atomic_fixed2_mul( ident_t *id_ref, int gtid, short * lhs, short rhs );
461void __kmpc_atomic_fixed2_orb( ident_t *id_ref, int gtid, short * lhs, short rhs );
462void __kmpc_atomic_fixed2_shl( ident_t *id_ref, int gtid, short * lhs, short rhs );
463void __kmpc_atomic_fixed2_shr( ident_t *id_ref, int gtid, short * lhs, short rhs );
464void __kmpc_atomic_fixed2u_shr( ident_t *id_ref, int gtid, unsigned short * lhs, unsigned short rhs );
465void __kmpc_atomic_fixed2_sub( ident_t *id_ref, int gtid, short * lhs, short rhs );
466void __kmpc_atomic_fixed2_xor( ident_t *id_ref, int gtid, short * lhs, short rhs );
467// 4-byte add / sub fixed
468void __kmpc_atomic_fixed4_add( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
469void __kmpc_atomic_fixed4_sub( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
470// 4-byte add / sub float
471void __kmpc_atomic_float4_add( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real32 rhs );
472void __kmpc_atomic_float4_sub( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real32 rhs );
473// 8-byte add / sub fixed
474void __kmpc_atomic_fixed8_add( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
475void __kmpc_atomic_fixed8_sub( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
476// 8-byte add / sub float
477void __kmpc_atomic_float8_add( ident_t *id_ref, int gtid, kmp_real64 * lhs, kmp_real64 rhs );
478void __kmpc_atomic_float8_sub( ident_t *id_ref, int gtid, kmp_real64 * lhs, kmp_real64 rhs );
479// 4-byte fixed
480void __kmpc_atomic_fixed4_andb( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
481void __kmpc_atomic_fixed4_div( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
482void __kmpc_atomic_fixed4u_div( ident_t *id_ref, int gtid, kmp_uint32 * lhs, kmp_uint32 rhs );
483void __kmpc_atomic_fixed4_mul( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
484void __kmpc_atomic_fixed4_orb( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
485void __kmpc_atomic_fixed4_shl( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
486void __kmpc_atomic_fixed4_shr( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
487void __kmpc_atomic_fixed4u_shr( ident_t *id_ref, int gtid, kmp_uint32 * lhs, kmp_uint32 rhs );
488void __kmpc_atomic_fixed4_xor( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
489// 8-byte fixed
490void __kmpc_atomic_fixed8_andb( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
491void __kmpc_atomic_fixed8_div( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
492void __kmpc_atomic_fixed8u_div( ident_t *id_ref, int gtid, kmp_uint64 * lhs, kmp_uint64 rhs );
493void __kmpc_atomic_fixed8_mul( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
494void __kmpc_atomic_fixed8_orb( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
495void __kmpc_atomic_fixed8_shl( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
496void __kmpc_atomic_fixed8_shr( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
497void __kmpc_atomic_fixed8u_shr( ident_t *id_ref, int gtid, kmp_uint64 * lhs, kmp_uint64 rhs );
498void __kmpc_atomic_fixed8_xor( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
499// 4-byte float
500void __kmpc_atomic_float4_div( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real32 rhs );
501void __kmpc_atomic_float4_mul( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real32 rhs );
502// 8-byte float
503void __kmpc_atomic_float8_div( ident_t *id_ref, int gtid, kmp_real64 * lhs, kmp_real64 rhs );
504void __kmpc_atomic_float8_mul( ident_t *id_ref, int gtid, kmp_real64 * lhs, kmp_real64 rhs );
505// 1-, 2-, 4-, 8-byte logical (&&, ||)
506void __kmpc_atomic_fixed1_andl( ident_t *id_ref, int gtid, char * lhs, char rhs );
507void __kmpc_atomic_fixed1_orl( ident_t *id_ref, int gtid, char * lhs, char rhs );
508void __kmpc_atomic_fixed2_andl( ident_t *id_ref, int gtid, short * lhs, short rhs );
509void __kmpc_atomic_fixed2_orl( ident_t *id_ref, int gtid, short * lhs, short rhs );
510void __kmpc_atomic_fixed4_andl( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
511void __kmpc_atomic_fixed4_orl( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
512void __kmpc_atomic_fixed8_andl( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
513void __kmpc_atomic_fixed8_orl( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
514// MIN / MAX
515void __kmpc_atomic_fixed1_max( ident_t *id_ref, int gtid, char * lhs, char rhs );
516void __kmpc_atomic_fixed1_min( ident_t *id_ref, int gtid, char * lhs, char rhs );
517void __kmpc_atomic_fixed2_max( ident_t *id_ref, int gtid, short * lhs, short rhs );
518void __kmpc_atomic_fixed2_min( ident_t *id_ref, int gtid, short * lhs, short rhs );
519void __kmpc_atomic_fixed4_max( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
520void __kmpc_atomic_fixed4_min( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
521void __kmpc_atomic_fixed8_max( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
522void __kmpc_atomic_fixed8_min( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
523void __kmpc_atomic_float4_max( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real32 rhs );
524void __kmpc_atomic_float4_min( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real32 rhs );
525void __kmpc_atomic_float8_max( ident_t *id_ref, int gtid, kmp_real64 * lhs, kmp_real64 rhs );
526void __kmpc_atomic_float8_min( ident_t *id_ref, int gtid, kmp_real64 * lhs, kmp_real64 rhs );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000527#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000528void __kmpc_atomic_float16_max( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs );
529void __kmpc_atomic_float16_min( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs );
530#if ( KMP_ARCH_X86 )
531 // Routines with 16-byte arguments aligned to 16-byte boundary; IA-32 architecture only
532 void __kmpc_atomic_float16_max_a16( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs );
533 void __kmpc_atomic_float16_min_a16( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs );
534#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +0000535#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000536// .NEQV. (same as xor)
537void __kmpc_atomic_fixed1_neqv( ident_t *id_ref, int gtid, char * lhs, char rhs );
538void __kmpc_atomic_fixed2_neqv( ident_t *id_ref, int gtid, short * lhs, short rhs );
539void __kmpc_atomic_fixed4_neqv( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
540void __kmpc_atomic_fixed8_neqv( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
541// .EQV. (same as ~xor)
542void __kmpc_atomic_fixed1_eqv( ident_t *id_ref, int gtid, char * lhs, char rhs );
543void __kmpc_atomic_fixed2_eqv( ident_t *id_ref, int gtid, short * lhs, short rhs );
544void __kmpc_atomic_fixed4_eqv( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
545void __kmpc_atomic_fixed8_eqv( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
546// long double type
547void __kmpc_atomic_float10_add( ident_t *id_ref, int gtid, long double * lhs, long double rhs );
548void __kmpc_atomic_float10_sub( ident_t *id_ref, int gtid, long double * lhs, long double rhs );
549void __kmpc_atomic_float10_mul( ident_t *id_ref, int gtid, long double * lhs, long double rhs );
550void __kmpc_atomic_float10_div( ident_t *id_ref, int gtid, long double * lhs, long double rhs );
551// _Quad type
Jim Cownie181b4bb2013-12-23 17:28:57 +0000552#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000553void __kmpc_atomic_float16_add( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs );
554void __kmpc_atomic_float16_sub( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs );
555void __kmpc_atomic_float16_mul( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs );
556void __kmpc_atomic_float16_div( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs );
557#if ( KMP_ARCH_X86 )
558 // Routines with 16-byte arguments aligned to 16-byte boundary
559 void __kmpc_atomic_float16_add_a16( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs );
560 void __kmpc_atomic_float16_sub_a16( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs );
561 void __kmpc_atomic_float16_mul_a16( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs );
562 void __kmpc_atomic_float16_div_a16( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs );
563#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +0000564#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000565// routines for complex types
566void __kmpc_atomic_cmplx4_add( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs );
567void __kmpc_atomic_cmplx4_sub( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs );
568void __kmpc_atomic_cmplx4_mul( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs );
569void __kmpc_atomic_cmplx4_div( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs );
570void __kmpc_atomic_cmplx8_add( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs );
571void __kmpc_atomic_cmplx8_sub( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs );
572void __kmpc_atomic_cmplx8_mul( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs );
573void __kmpc_atomic_cmplx8_div( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs );
574void __kmpc_atomic_cmplx10_add( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs );
575void __kmpc_atomic_cmplx10_sub( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs );
576void __kmpc_atomic_cmplx10_mul( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs );
577void __kmpc_atomic_cmplx10_div( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000578#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000579void __kmpc_atomic_cmplx16_add( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs );
580void __kmpc_atomic_cmplx16_sub( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs );
581void __kmpc_atomic_cmplx16_mul( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs );
582void __kmpc_atomic_cmplx16_div( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs );
583#if ( KMP_ARCH_X86 )
584 // Routines with 16-byte arguments aligned to 16-byte boundary
585 void __kmpc_atomic_cmplx16_add_a16( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs );
586 void __kmpc_atomic_cmplx16_sub_a16( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs );
587 void __kmpc_atomic_cmplx16_mul_a16( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs );
588 void __kmpc_atomic_cmplx16_div_a16( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs );
589#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +0000590#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000591
592#if OMP_40_ENABLED
593
594// OpenMP 4.0: x = expr binop x for non-commutative operations.
595// Supported only on IA-32 architecture and Intel(R) 64
596#if KMP_ARCH_X86 || KMP_ARCH_X86_64
597
598void __kmpc_atomic_fixed1_sub_rev( ident_t *id_ref, int gtid, char * lhs, char rhs );
599void __kmpc_atomic_fixed1_div_rev( ident_t *id_ref, int gtid, char * lhs, char rhs );
600void __kmpc_atomic_fixed1u_div_rev( ident_t *id_ref, int gtid, unsigned char * lhs, unsigned char rhs );
601void __kmpc_atomic_fixed1_shl_rev( ident_t *id_ref, int gtid, char * lhs, char rhs );
602void __kmpc_atomic_fixed1_shr_rev( ident_t *id_ref, int gtid, char * lhs, char rhs );
603void __kmpc_atomic_fixed1u_shr_rev( ident_t *id_ref, int gtid, unsigned char * lhs, unsigned char rhs );
604void __kmpc_atomic_fixed2_sub_rev( ident_t *id_ref, int gtid, short * lhs, short rhs );
605void __kmpc_atomic_fixed2_div_rev( ident_t *id_ref, int gtid, short * lhs, short rhs );
606void __kmpc_atomic_fixed2u_div_rev( ident_t *id_ref, int gtid, unsigned short * lhs, unsigned short rhs );
607void __kmpc_atomic_fixed2_shl_rev( ident_t *id_ref, int gtid, short * lhs, short rhs );
608void __kmpc_atomic_fixed2_shr_rev( ident_t *id_ref, int gtid, short * lhs, short rhs );
609void __kmpc_atomic_fixed2u_shr_rev( ident_t *id_ref, int gtid, unsigned short * lhs, unsigned short rhs );
610void __kmpc_atomic_fixed4_sub_rev( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
611void __kmpc_atomic_fixed4_div_rev( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
612void __kmpc_atomic_fixed4u_div_rev( ident_t *id_ref, int gtid, kmp_uint32 * lhs, kmp_uint32 rhs );
613void __kmpc_atomic_fixed4_shl_rev( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
614void __kmpc_atomic_fixed4_shr_rev( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
615void __kmpc_atomic_fixed4u_shr_rev( ident_t *id_ref, int gtid, kmp_uint32 * lhs, kmp_uint32 rhs );
616void __kmpc_atomic_fixed8_sub_rev( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
617void __kmpc_atomic_fixed8_div_rev( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
618void __kmpc_atomic_fixed8u_div_rev( ident_t *id_ref, int gtid, kmp_uint64 * lhs, kmp_uint64 rhs );
619void __kmpc_atomic_fixed8_shl_rev( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
620void __kmpc_atomic_fixed8_shr_rev( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
621void __kmpc_atomic_fixed8u_shr_rev( ident_t *id_ref, int gtid, kmp_uint64 * lhs, kmp_uint64 rhs );
622void __kmpc_atomic_float4_sub_rev( ident_t *id_ref, int gtid, float * lhs, float rhs );
623void __kmpc_atomic_float4_div_rev( ident_t *id_ref, int gtid, float * lhs, float rhs );
624void __kmpc_atomic_float8_sub_rev( ident_t *id_ref, int gtid, double * lhs, double rhs );
625void __kmpc_atomic_float8_div_rev( ident_t *id_ref, int gtid, double * lhs, double rhs );
626void __kmpc_atomic_float10_sub_rev( ident_t *id_ref, int gtid, long double * lhs, long double rhs );
627void __kmpc_atomic_float10_div_rev( ident_t *id_ref, int gtid, long double * lhs, long double rhs );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000628#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000629void __kmpc_atomic_float16_sub_rev( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs );
630void __kmpc_atomic_float16_div_rev( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000631#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000632void __kmpc_atomic_cmplx4_sub_rev( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs );
633void __kmpc_atomic_cmplx4_div_rev( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs );
634void __kmpc_atomic_cmplx8_sub_rev( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs );
635void __kmpc_atomic_cmplx8_div_rev( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs );
636void __kmpc_atomic_cmplx10_sub_rev( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs );
637void __kmpc_atomic_cmplx10_div_rev( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000638#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000639void __kmpc_atomic_cmplx16_sub_rev( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs );
640void __kmpc_atomic_cmplx16_div_rev( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs );
641#if ( KMP_ARCH_X86 )
642 // Routines with 16-byte arguments aligned to 16-byte boundary
643 void __kmpc_atomic_float16_sub_a16_rev( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs );
644 void __kmpc_atomic_float16_div_a16_rev( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs );
645 void __kmpc_atomic_cmplx16_sub_a16_rev( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs );
646 void __kmpc_atomic_cmplx16_div_a16_rev( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs );
647#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +0000648#endif // KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000649
650#endif //KMP_ARCH_X86 || KMP_ARCH_X86_64
651
652#endif //OMP_40_ENABLED
653
654// routines for mixed types
655
656// RHS=float8
657void __kmpc_atomic_fixed1_mul_float8( ident_t *id_ref, int gtid, char * lhs, kmp_real64 rhs );
658void __kmpc_atomic_fixed1_div_float8( ident_t *id_ref, int gtid, char * lhs, kmp_real64 rhs );
659void __kmpc_atomic_fixed2_mul_float8( ident_t *id_ref, int gtid, short * lhs, kmp_real64 rhs );
660void __kmpc_atomic_fixed2_div_float8( ident_t *id_ref, int gtid, short * lhs, kmp_real64 rhs );
661void __kmpc_atomic_fixed4_mul_float8( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_real64 rhs );
662void __kmpc_atomic_fixed4_div_float8( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_real64 rhs );
663void __kmpc_atomic_fixed8_mul_float8( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_real64 rhs );
664void __kmpc_atomic_fixed8_div_float8( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_real64 rhs );
665void __kmpc_atomic_float4_add_float8( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real64 rhs );
666void __kmpc_atomic_float4_sub_float8( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real64 rhs );
667void __kmpc_atomic_float4_mul_float8( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real64 rhs );
668void __kmpc_atomic_float4_div_float8( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real64 rhs );
669
670// RHS=float16 (deprecated, to be removed when we are sure the compiler does not use them)
Jim Cownie181b4bb2013-12-23 17:28:57 +0000671#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000672void __kmpc_atomic_fixed1_add_fp( ident_t *id_ref, int gtid, char * lhs, _Quad rhs );
673void __kmpc_atomic_fixed1_sub_fp( ident_t *id_ref, int gtid, char * lhs, _Quad rhs );
674void __kmpc_atomic_fixed1_mul_fp( ident_t *id_ref, int gtid, char * lhs, _Quad rhs );
675void __kmpc_atomic_fixed1_div_fp( ident_t *id_ref, int gtid, char * lhs, _Quad rhs );
676void __kmpc_atomic_fixed1u_div_fp( ident_t *id_ref, int gtid, unsigned char * lhs, _Quad rhs );
677
678void __kmpc_atomic_fixed2_add_fp( ident_t *id_ref, int gtid, short * lhs, _Quad rhs );
679void __kmpc_atomic_fixed2_sub_fp( ident_t *id_ref, int gtid, short * lhs, _Quad rhs );
680void __kmpc_atomic_fixed2_mul_fp( ident_t *id_ref, int gtid, short * lhs, _Quad rhs );
681void __kmpc_atomic_fixed2_div_fp( ident_t *id_ref, int gtid, short * lhs, _Quad rhs );
682void __kmpc_atomic_fixed2u_div_fp( ident_t *id_ref, int gtid, unsigned short * lhs, _Quad rhs );
683
684void __kmpc_atomic_fixed4_add_fp( ident_t *id_ref, int gtid, kmp_int32 * lhs, _Quad rhs );
685void __kmpc_atomic_fixed4_sub_fp( ident_t *id_ref, int gtid, kmp_int32 * lhs, _Quad rhs );
686void __kmpc_atomic_fixed4_mul_fp( ident_t *id_ref, int gtid, kmp_int32 * lhs, _Quad rhs );
687void __kmpc_atomic_fixed4_div_fp( ident_t *id_ref, int gtid, kmp_int32 * lhs, _Quad rhs );
688void __kmpc_atomic_fixed4u_div_fp( ident_t *id_ref, int gtid, kmp_uint32 * lhs, _Quad rhs );
689
690void __kmpc_atomic_fixed8_add_fp( ident_t *id_ref, int gtid, kmp_int64 * lhs, _Quad rhs );
691void __kmpc_atomic_fixed8_sub_fp( ident_t *id_ref, int gtid, kmp_int64 * lhs, _Quad rhs );
692void __kmpc_atomic_fixed8_mul_fp( ident_t *id_ref, int gtid, kmp_int64 * lhs, _Quad rhs );
693void __kmpc_atomic_fixed8_div_fp( ident_t *id_ref, int gtid, kmp_int64 * lhs, _Quad rhs );
694void __kmpc_atomic_fixed8u_div_fp( ident_t *id_ref, int gtid, kmp_uint64 * lhs, _Quad rhs );
695
696void __kmpc_atomic_float4_add_fp( ident_t *id_ref, int gtid, kmp_real32 * lhs, _Quad rhs );
697void __kmpc_atomic_float4_sub_fp( ident_t *id_ref, int gtid, kmp_real32 * lhs, _Quad rhs );
698void __kmpc_atomic_float4_mul_fp( ident_t *id_ref, int gtid, kmp_real32 * lhs, _Quad rhs );
699void __kmpc_atomic_float4_div_fp( ident_t *id_ref, int gtid, kmp_real32 * lhs, _Quad rhs );
700
701void __kmpc_atomic_float8_add_fp( ident_t *id_ref, int gtid, kmp_real64 * lhs, _Quad rhs );
702void __kmpc_atomic_float8_sub_fp( ident_t *id_ref, int gtid, kmp_real64 * lhs, _Quad rhs );
703void __kmpc_atomic_float8_mul_fp( ident_t *id_ref, int gtid, kmp_real64 * lhs, _Quad rhs );
704void __kmpc_atomic_float8_div_fp( ident_t *id_ref, int gtid, kmp_real64 * lhs, _Quad rhs );
705
706void __kmpc_atomic_float10_add_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs );
707void __kmpc_atomic_float10_sub_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs );
708void __kmpc_atomic_float10_mul_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs );
709void __kmpc_atomic_float10_div_fp( ident_t *id_ref, int gtid, long double * lhs, _Quad rhs );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000710#endif // KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000711
712// RHS=cmplx8
713void __kmpc_atomic_cmplx4_add_cmplx8( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx64 rhs );
714void __kmpc_atomic_cmplx4_sub_cmplx8( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx64 rhs );
715void __kmpc_atomic_cmplx4_mul_cmplx8( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx64 rhs );
716void __kmpc_atomic_cmplx4_div_cmplx8( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx64 rhs );
717
718// generic atomic routines
719void __kmpc_atomic_1( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) );
720void __kmpc_atomic_2( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) );
721void __kmpc_atomic_4( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) );
722void __kmpc_atomic_8( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) );
723void __kmpc_atomic_10( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) );
724void __kmpc_atomic_16( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) );
725void __kmpc_atomic_20( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) );
726void __kmpc_atomic_32( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) );
727
728// READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64
729#if KMP_ARCH_X86 || KMP_ARCH_X86_64
730
731//
732// Below routines for atomic READ are listed
733//
734
735char __kmpc_atomic_fixed1_rd( ident_t *id_ref, int gtid, char * loc );
736short __kmpc_atomic_fixed2_rd( ident_t *id_ref, int gtid, short * loc );
737kmp_int32 __kmpc_atomic_fixed4_rd( ident_t *id_ref, int gtid, kmp_int32 * loc );
738kmp_int64 __kmpc_atomic_fixed8_rd( ident_t *id_ref, int gtid, kmp_int64 * loc );
739kmp_real32 __kmpc_atomic_float4_rd( ident_t *id_ref, int gtid, kmp_real32 * loc );
740kmp_real64 __kmpc_atomic_float8_rd( ident_t *id_ref, int gtid, kmp_real64 * loc );
741long double __kmpc_atomic_float10_rd( ident_t *id_ref, int gtid, long double * loc );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000742#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000743QUAD_LEGACY __kmpc_atomic_float16_rd( ident_t *id_ref, int gtid, QUAD_LEGACY * loc );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000744#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000745// Fix for CQ220361: cmplx4 READ will return void on Windows* OS; read value will be
746// returned through an additional parameter
747#if ( KMP_OS_WINDOWS )
748 void __kmpc_atomic_cmplx4_rd( kmp_cmplx32 * out, ident_t *id_ref, int gtid, kmp_cmplx32 * loc );
749#else
750 kmp_cmplx32 __kmpc_atomic_cmplx4_rd( ident_t *id_ref, int gtid, kmp_cmplx32 * loc );
751#endif
752kmp_cmplx64 __kmpc_atomic_cmplx8_rd( ident_t *id_ref, int gtid, kmp_cmplx64 * loc );
753kmp_cmplx80 __kmpc_atomic_cmplx10_rd( ident_t *id_ref, int gtid, kmp_cmplx80 * loc );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000754#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000755CPLX128_LEG __kmpc_atomic_cmplx16_rd( ident_t *id_ref, int gtid, CPLX128_LEG * loc );
756#if ( KMP_ARCH_X86 )
757 // Routines with 16-byte arguments aligned to 16-byte boundary
758 Quad_a16_t __kmpc_atomic_float16_a16_rd( ident_t * id_ref, int gtid, Quad_a16_t * loc );
759 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_a16_rd( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * loc );
760#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +0000761#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000762
763
764//
765// Below routines for atomic WRITE are listed
766//
767
768void __kmpc_atomic_fixed1_wr( ident_t *id_ref, int gtid, char * lhs, char rhs );
769void __kmpc_atomic_fixed2_wr( ident_t *id_ref, int gtid, short * lhs, short rhs );
770void __kmpc_atomic_fixed4_wr( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
771void __kmpc_atomic_fixed8_wr( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
772void __kmpc_atomic_float4_wr( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real32 rhs );
773void __kmpc_atomic_float8_wr( ident_t *id_ref, int gtid, kmp_real64 * lhs, kmp_real64 rhs );
774void __kmpc_atomic_float10_wr( ident_t *id_ref, int gtid, long double * lhs, long double rhs );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000775#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000776void __kmpc_atomic_float16_wr( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000777#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000778void __kmpc_atomic_cmplx4_wr( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs );
779void __kmpc_atomic_cmplx8_wr( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs );
780void __kmpc_atomic_cmplx10_wr( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000781#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000782void __kmpc_atomic_cmplx16_wr( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs );
783#if ( KMP_ARCH_X86 )
784 // Routines with 16-byte arguments aligned to 16-byte boundary
785 void __kmpc_atomic_float16_a16_wr( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs );
786 void __kmpc_atomic_cmplx16_a16_wr( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs );
787#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +0000788#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000789
790//
791// Below routines for atomic CAPTURE are listed
792//
793
794// 1-byte
795char __kmpc_atomic_fixed1_add_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
796char __kmpc_atomic_fixed1_andb_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
797char __kmpc_atomic_fixed1_div_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
798unsigned char __kmpc_atomic_fixed1u_div_cpt( ident_t *id_ref, int gtid, unsigned char * lhs, unsigned char rhs, int flag);
799char __kmpc_atomic_fixed1_mul_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
800char __kmpc_atomic_fixed1_orb_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
801char __kmpc_atomic_fixed1_shl_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
802char __kmpc_atomic_fixed1_shr_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
803unsigned char __kmpc_atomic_fixed1u_shr_cpt( ident_t *id_ref, int gtid, unsigned char * lhs, unsigned char rhs, int flag);
804char __kmpc_atomic_fixed1_sub_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
805char __kmpc_atomic_fixed1_xor_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
806// 2-byte
807short __kmpc_atomic_fixed2_add_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
808short __kmpc_atomic_fixed2_andb_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
809short __kmpc_atomic_fixed2_div_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
810unsigned short __kmpc_atomic_fixed2u_div_cpt( ident_t *id_ref, int gtid, unsigned short * lhs, unsigned short rhs, int flag);
811short __kmpc_atomic_fixed2_mul_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
812short __kmpc_atomic_fixed2_orb_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
813short __kmpc_atomic_fixed2_shl_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
814short __kmpc_atomic_fixed2_shr_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
815unsigned short __kmpc_atomic_fixed2u_shr_cpt( ident_t *id_ref, int gtid, unsigned short * lhs, unsigned short rhs, int flag);
816short __kmpc_atomic_fixed2_sub_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
817short __kmpc_atomic_fixed2_xor_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
818// 4-byte add / sub fixed
819kmp_int32 __kmpc_atomic_fixed4_add_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
820kmp_int32 __kmpc_atomic_fixed4_sub_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
821// 4-byte add / sub float
822kmp_real32 __kmpc_atomic_float4_add_cpt( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real32 rhs, int flag);
823kmp_real32 __kmpc_atomic_float4_sub_cpt( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real32 rhs, int flag);
824// 8-byte add / sub fixed
825kmp_int64 __kmpc_atomic_fixed8_add_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
826kmp_int64 __kmpc_atomic_fixed8_sub_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
827// 8-byte add / sub float
828kmp_real64 __kmpc_atomic_float8_add_cpt( ident_t *id_ref, int gtid, kmp_real64 * lhs, kmp_real64 rhs, int flag);
829kmp_real64 __kmpc_atomic_float8_sub_cpt( ident_t *id_ref, int gtid, kmp_real64 * lhs, kmp_real64 rhs, int flag);
830// 4-byte fixed
831kmp_int32 __kmpc_atomic_fixed4_andb_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
832kmp_int32 __kmpc_atomic_fixed4_div_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
833kmp_uint32 __kmpc_atomic_fixed4u_div_cpt( ident_t *id_ref, int gtid, kmp_uint32 * lhs, kmp_uint32 rhs, int flag);
834kmp_int32 __kmpc_atomic_fixed4_mul_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
835kmp_int32 __kmpc_atomic_fixed4_orb_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
836kmp_int32 __kmpc_atomic_fixed4_shl_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
837kmp_int32 __kmpc_atomic_fixed4_shr_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
838kmp_uint32 __kmpc_atomic_fixed4u_shr_cpt( ident_t *id_ref, int gtid, kmp_uint32 * lhs, kmp_uint32 rhs, int flag);
839kmp_int32 __kmpc_atomic_fixed4_xor_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
840// 8-byte fixed
841kmp_int64 __kmpc_atomic_fixed8_andb_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
842kmp_int64 __kmpc_atomic_fixed8_div_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
843kmp_uint64 __kmpc_atomic_fixed8u_div_cpt( ident_t *id_ref, int gtid, kmp_uint64 * lhs, kmp_uint64 rhs, int flag);
844kmp_int64 __kmpc_atomic_fixed8_mul_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
845kmp_int64 __kmpc_atomic_fixed8_orb_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
846kmp_int64 __kmpc_atomic_fixed8_shl_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
847kmp_int64 __kmpc_atomic_fixed8_shr_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
848kmp_uint64 __kmpc_atomic_fixed8u_shr_cpt( ident_t *id_ref, int gtid, kmp_uint64 * lhs, kmp_uint64 rhs, int flag);
849kmp_int64 __kmpc_atomic_fixed8_xor_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
850// 4-byte float
851kmp_real32 __kmpc_atomic_float4_div_cpt( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real32 rhs, int flag);
852kmp_real32 __kmpc_atomic_float4_mul_cpt( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real32 rhs, int flag);
853// 8-byte float
854kmp_real64 __kmpc_atomic_float8_div_cpt( ident_t *id_ref, int gtid, kmp_real64 * lhs, kmp_real64 rhs, int flag);
855kmp_real64 __kmpc_atomic_float8_mul_cpt( ident_t *id_ref, int gtid, kmp_real64 * lhs, kmp_real64 rhs, int flag);
856// 1-, 2-, 4-, 8-byte logical (&&, ||)
857char __kmpc_atomic_fixed1_andl_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
858char __kmpc_atomic_fixed1_orl_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
859short __kmpc_atomic_fixed2_andl_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
860short __kmpc_atomic_fixed2_orl_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
861kmp_int32 __kmpc_atomic_fixed4_andl_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
862kmp_int32 __kmpc_atomic_fixed4_orl_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
863kmp_int64 __kmpc_atomic_fixed8_andl_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
864kmp_int64 __kmpc_atomic_fixed8_orl_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
865// MIN / MAX
866char __kmpc_atomic_fixed1_max_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
867char __kmpc_atomic_fixed1_min_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
868short __kmpc_atomic_fixed2_max_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
869short __kmpc_atomic_fixed2_min_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
870kmp_int32 __kmpc_atomic_fixed4_max_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
871kmp_int32 __kmpc_atomic_fixed4_min_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
872kmp_int64 __kmpc_atomic_fixed8_max_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
873kmp_int64 __kmpc_atomic_fixed8_min_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
874kmp_real32 __kmpc_atomic_float4_max_cpt( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real32 rhs, int flag);
875kmp_real32 __kmpc_atomic_float4_min_cpt( ident_t *id_ref, int gtid, kmp_real32 * lhs, kmp_real32 rhs, int flag);
876kmp_real64 __kmpc_atomic_float8_max_cpt( ident_t *id_ref, int gtid, kmp_real64 * lhs, kmp_real64 rhs, int flag);
877kmp_real64 __kmpc_atomic_float8_min_cpt( ident_t *id_ref, int gtid, kmp_real64 * lhs, kmp_real64 rhs, int flag);
Jim Cownie181b4bb2013-12-23 17:28:57 +0000878#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000879QUAD_LEGACY __kmpc_atomic_float16_max_cpt( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs, int flag);
880QUAD_LEGACY __kmpc_atomic_float16_min_cpt( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs, int flag);
Jim Cownie181b4bb2013-12-23 17:28:57 +0000881#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000882// .NEQV. (same as xor)
883char __kmpc_atomic_fixed1_neqv_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
884short __kmpc_atomic_fixed2_neqv_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
885kmp_int32 __kmpc_atomic_fixed4_neqv_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
886kmp_int64 __kmpc_atomic_fixed8_neqv_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
887// .EQV. (same as ~xor)
888char __kmpc_atomic_fixed1_eqv_cpt( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag);
889short __kmpc_atomic_fixed2_eqv_cpt( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag);
890kmp_int32 __kmpc_atomic_fixed4_eqv_cpt( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag);
891kmp_int64 __kmpc_atomic_fixed8_eqv_cpt( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag);
892// long double type
893long double __kmpc_atomic_float10_add_cpt( ident_t *id_ref, int gtid, long double * lhs, long double rhs, int flag);
894long double __kmpc_atomic_float10_sub_cpt( ident_t *id_ref, int gtid, long double * lhs, long double rhs, int flag);
895long double __kmpc_atomic_float10_mul_cpt( ident_t *id_ref, int gtid, long double * lhs, long double rhs, int flag);
896long double __kmpc_atomic_float10_div_cpt( ident_t *id_ref, int gtid, long double * lhs, long double rhs, int flag);
Jim Cownie181b4bb2013-12-23 17:28:57 +0000897#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000898// _Quad type
899QUAD_LEGACY __kmpc_atomic_float16_add_cpt( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs, int flag);
900QUAD_LEGACY __kmpc_atomic_float16_sub_cpt( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs, int flag);
901QUAD_LEGACY __kmpc_atomic_float16_mul_cpt( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs, int flag);
902QUAD_LEGACY __kmpc_atomic_float16_div_cpt( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs, int flag);
Jim Cownie181b4bb2013-12-23 17:28:57 +0000903#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000904// routines for complex types
905// Workaround for cmplx4 routines - return void; captured value is returned via the argument
906void __kmpc_atomic_cmplx4_add_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag);
907void __kmpc_atomic_cmplx4_sub_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag);
908void __kmpc_atomic_cmplx4_mul_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag);
909void __kmpc_atomic_cmplx4_div_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag);
910
911kmp_cmplx64 __kmpc_atomic_cmplx8_add_cpt( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs, int flag);
912kmp_cmplx64 __kmpc_atomic_cmplx8_sub_cpt( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs, int flag);
913kmp_cmplx64 __kmpc_atomic_cmplx8_mul_cpt( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs, int flag);
914kmp_cmplx64 __kmpc_atomic_cmplx8_div_cpt( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs, int flag);
915kmp_cmplx80 __kmpc_atomic_cmplx10_add_cpt( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs, int flag);
916kmp_cmplx80 __kmpc_atomic_cmplx10_sub_cpt( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs, int flag);
917kmp_cmplx80 __kmpc_atomic_cmplx10_mul_cpt( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs, int flag);
918kmp_cmplx80 __kmpc_atomic_cmplx10_div_cpt( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs, int flag);
Jim Cownie181b4bb2013-12-23 17:28:57 +0000919#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000920CPLX128_LEG __kmpc_atomic_cmplx16_add_cpt( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs, int flag);
921CPLX128_LEG __kmpc_atomic_cmplx16_sub_cpt( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs, int flag);
922CPLX128_LEG __kmpc_atomic_cmplx16_mul_cpt( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs, int flag);
923CPLX128_LEG __kmpc_atomic_cmplx16_div_cpt( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs, int flag);
924#if ( KMP_ARCH_X86 )
925 // Routines with 16-byte arguments aligned to 16-byte boundary
926 Quad_a16_t __kmpc_atomic_float16_add_a16_cpt( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs, int flag);
927 Quad_a16_t __kmpc_atomic_float16_sub_a16_cpt( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs, int flag);
928 Quad_a16_t __kmpc_atomic_float16_mul_a16_cpt( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs, int flag);
929 Quad_a16_t __kmpc_atomic_float16_div_a16_cpt( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs, int flag);
930 Quad_a16_t __kmpc_atomic_float16_max_a16_cpt( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs, int flag);
931 Quad_a16_t __kmpc_atomic_float16_min_a16_cpt( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs, int flag);
932 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_add_a16_cpt( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs, int flag);
933 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_sub_a16_cpt( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs, int flag);
934 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_mul_a16_cpt( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs, int flag);
935 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_div_a16_cpt( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs, int flag);
936#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +0000937#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000938
939void __kmpc_atomic_start(void);
940void __kmpc_atomic_end(void);
941
942#if OMP_40_ENABLED
943
944// OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr binop x; v = x; } for non-commutative operations.
945
946char __kmpc_atomic_fixed1_sub_cpt_rev( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag );
947char __kmpc_atomic_fixed1_div_cpt_rev( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag );
948unsigned char __kmpc_atomic_fixed1u_div_cpt_rev( ident_t *id_ref, int gtid, unsigned char * lhs, unsigned char rhs, int flag );
949char __kmpc_atomic_fixed1_shl_cpt_rev( ident_t *id_ref, int gtid, char * lhs, char rhs , int flag);
950char __kmpc_atomic_fixed1_shr_cpt_rev( ident_t *id_ref, int gtid, char * lhs, char rhs, int flag );
951unsigned char __kmpc_atomic_fixed1u_shr_cpt_rev( ident_t *id_ref, int gtid, unsigned char * lhs, unsigned char rhs, int flag );
952short __kmpc_atomic_fixed2_sub_cpt_rev( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag );
953short __kmpc_atomic_fixed2_div_cpt_rev( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag );
954unsigned short __kmpc_atomic_fixed2u_div_cpt_rev( ident_t *id_ref, int gtid, unsigned short * lhs, unsigned short rhs, int flag );
955short __kmpc_atomic_fixed2_shl_cpt_rev( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag );
956short __kmpc_atomic_fixed2_shr_cpt_rev( ident_t *id_ref, int gtid, short * lhs, short rhs, int flag );
957unsigned short __kmpc_atomic_fixed2u_shr_cpt_rev( ident_t *id_ref, int gtid, unsigned short * lhs, unsigned short rhs, int flag );
958kmp_int32 __kmpc_atomic_fixed4_sub_cpt_rev( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag );
959kmp_int32 __kmpc_atomic_fixed4_div_cpt_rev( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag );
960kmp_uint32 __kmpc_atomic_fixed4u_div_cpt_rev( ident_t *id_ref, int gtid, kmp_uint32 * lhs, kmp_uint32 rhs, int flag );
961kmp_int32 __kmpc_atomic_fixed4_shl_cpt_rev( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag );
962kmp_int32 __kmpc_atomic_fixed4_shr_cpt_rev( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs, int flag );
963kmp_uint32 __kmpc_atomic_fixed4u_shr_cpt_rev( ident_t *id_ref, int gtid, kmp_uint32 * lhs, kmp_uint32 rhs, int flag );
964kmp_int64 __kmpc_atomic_fixed8_sub_cpt_rev( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag );
965kmp_int64 __kmpc_atomic_fixed8_div_cpt_rev( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag );
966kmp_uint64 __kmpc_atomic_fixed8u_div_cpt_rev( ident_t *id_ref, int gtid, kmp_uint64 * lhs, kmp_uint64 rhs, int flag );
967kmp_int64 __kmpc_atomic_fixed8_shl_cpt_rev( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag );
968kmp_int64 __kmpc_atomic_fixed8_shr_cpt_rev( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs, int flag );
969kmp_uint64 __kmpc_atomic_fixed8u_shr_cpt_rev( ident_t *id_ref, int gtid, kmp_uint64 * lhs, kmp_uint64 rhs, int flag );
970float __kmpc_atomic_float4_sub_cpt_rev( ident_t *id_ref, int gtid, float * lhs, float rhs, int flag );
971float __kmpc_atomic_float4_div_cpt_rev( ident_t *id_ref, int gtid, float * lhs, float rhs, int flag );
972double __kmpc_atomic_float8_sub_cpt_rev( ident_t *id_ref, int gtid, double * lhs, double rhs, int flag );
973double __kmpc_atomic_float8_div_cpt_rev( ident_t *id_ref, int gtid, double * lhs, double rhs, int flag );
974long double __kmpc_atomic_float10_sub_cpt_rev( ident_t *id_ref, int gtid, long double * lhs, long double rhs, int flag );
975long double __kmpc_atomic_float10_div_cpt_rev( ident_t *id_ref, int gtid, long double * lhs, long double rhs, int flag );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000976#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000977QUAD_LEGACY __kmpc_atomic_float16_sub_cpt_rev( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs, int flag );
978QUAD_LEGACY __kmpc_atomic_float16_div_cpt_rev( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs, int flag );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000979#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000980// Workaround for cmplx4 routines - return void; captured value is returned via the argument
981void __kmpc_atomic_cmplx4_sub_cpt_rev( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag );
982void __kmpc_atomic_cmplx4_div_cpt_rev( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag );
983kmp_cmplx64 __kmpc_atomic_cmplx8_sub_cpt_rev( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs, int flag );
984kmp_cmplx64 __kmpc_atomic_cmplx8_div_cpt_rev( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs, int flag );
985kmp_cmplx80 __kmpc_atomic_cmplx10_sub_cpt_rev( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs, int flag );
986kmp_cmplx80 __kmpc_atomic_cmplx10_div_cpt_rev( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs, int flag );
Jim Cownie181b4bb2013-12-23 17:28:57 +0000987#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +0000988CPLX128_LEG __kmpc_atomic_cmplx16_sub_cpt_rev( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs, int flag );
989CPLX128_LEG __kmpc_atomic_cmplx16_div_cpt_rev( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs, int flag );
990#if ( KMP_ARCH_X86 )
991 Quad_a16_t __kmpc_atomic_float16_sub_a16_cpt_rev( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs, int flag );
992 Quad_a16_t __kmpc_atomic_float16_div_a16_cpt_rev( ident_t * id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs, int flag );
993 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_sub_a16_cpt_rev( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs, int flag );
994 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_div_a16_cpt_rev( ident_t * id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs, int flag );
995#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +0000996#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000997
998// OpenMP 4.0 Capture-write (swap): {v = x; x = expr;}
999char __kmpc_atomic_fixed1_swp( ident_t *id_ref, int gtid, char * lhs, char rhs );
1000short __kmpc_atomic_fixed2_swp( ident_t *id_ref, int gtid, short * lhs, short rhs );
1001kmp_int32 __kmpc_atomic_fixed4_swp( ident_t *id_ref, int gtid, kmp_int32 * lhs, kmp_int32 rhs );
1002kmp_int64 __kmpc_atomic_fixed8_swp( ident_t *id_ref, int gtid, kmp_int64 * lhs, kmp_int64 rhs );
1003float __kmpc_atomic_float4_swp( ident_t *id_ref, int gtid, float * lhs, float rhs );
1004double __kmpc_atomic_float8_swp( ident_t *id_ref, int gtid, double * lhs, double rhs );
1005long double __kmpc_atomic_float10_swp( ident_t *id_ref, int gtid, long double * lhs, long double rhs );
Jim Cownie181b4bb2013-12-23 17:28:57 +00001006#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +00001007QUAD_LEGACY __kmpc_atomic_float16_swp( ident_t *id_ref, int gtid, QUAD_LEGACY * lhs, QUAD_LEGACY rhs );
Jim Cownie181b4bb2013-12-23 17:28:57 +00001008#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001009// !!! TODO: check if we need a workaround here
1010void __kmpc_atomic_cmplx4_swp( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out );
1011//kmp_cmplx32 __kmpc_atomic_cmplx4_swp( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs );
1012
1013kmp_cmplx64 __kmpc_atomic_cmplx8_swp( ident_t *id_ref, int gtid, kmp_cmplx64 * lhs, kmp_cmplx64 rhs );
1014kmp_cmplx80 __kmpc_atomic_cmplx10_swp( ident_t *id_ref, int gtid, kmp_cmplx80 * lhs, kmp_cmplx80 rhs );
Jim Cownie181b4bb2013-12-23 17:28:57 +00001015#if KMP_HAVE_QUAD
Jim Cownie5e8470a2013-09-27 10:38:44 +00001016CPLX128_LEG __kmpc_atomic_cmplx16_swp( ident_t *id_ref, int gtid, CPLX128_LEG * lhs, CPLX128_LEG rhs );
1017#if ( KMP_ARCH_X86 )
1018 Quad_a16_t __kmpc_atomic_float16_a16_swp( ident_t *id_ref, int gtid, Quad_a16_t * lhs, Quad_a16_t rhs );
1019 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_a16_swp( ident_t *id_ref, int gtid, kmp_cmplx128_a16_t * lhs, kmp_cmplx128_a16_t rhs );
1020#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +00001021#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001022
1023// End of OpenMP 4.0 capture
1024
1025#endif //OMP_40_ENABLED
1026
1027#endif //KMP_ARCH_X86 || KMP_ARCH_X86_64
1028
1029/* ------------------------------------------------------------------------ */
1030/* ------------------------------------------------------------------------ */
1031
1032#ifdef __cplusplus
1033 } // extern "C"
1034#endif
1035
1036#endif /* KMP_ATOMIC_H */
1037
1038// end of file