Headers: be a bit more careful about inline asm
Conditionally include x86intrin.h if we are building for x86 or x86_64.
Conditionalise definition of inline assembly routines which use x86 or x86_64
inline assembly. This is needed as clang can target Windows on ARM where these
definitions may be included into user code.
llvm-svn: 211716
diff --git a/clang/lib/Headers/Intrin.h b/clang/lib/Headers/Intrin.h
index 37c0eb9..cad3a3a 100644
--- a/clang/lib/Headers/Intrin.h
+++ b/clang/lib/Headers/Intrin.h
@@ -30,7 +30,9 @@
#define __INTRIN_H
/* First include the standard intrinsics. */
+#if defined(__i386__) || defined(__x86_64__)
#include <x86intrin.h>
+#endif
/* For the definition of jmp_buf. */
#include <setjmp.h>
@@ -572,6 +574,7 @@
*a = *a | (1 << b);
return x;
}
+#if defined(__i386__) || defined(__x86_64__)
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_interlockedbittestandset(long volatile *__BitBase, long __BitPos) {
unsigned char __Res;
@@ -582,6 +585,7 @@
: "Ir"(__BitPos));
return __Res;
}
+#endif
#ifdef __x86_64__
static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
_BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask) {
@@ -813,6 +817,7 @@
/*----------------------------------------------------------------------------*\
|* Barriers
\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__attribute__((deprecated("use other intrinsics or C++11 atomics instead")))
_ReadWriteBarrier(void) {
@@ -828,6 +833,7 @@
_WriteBarrier(void) {
__asm__ volatile ("" : : : "memory");
}
+#endif
#ifdef __x86_64__
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__faststorefence(void) {
@@ -883,6 +889,7 @@
/*----------------------------------------------------------------------------*\
|* movs, stos
\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) {
__asm__("rep movsb" : : "D"(__dst), "S"(__src), "c"(__n)
@@ -913,6 +920,7 @@
__asm__("rep stosh" : : "D"(__dst), "a"(__x), "c"(__n)
: "%edi", "%ecx");
}
+#endif
#ifdef __x86_64__
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__movsq(unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
@@ -937,6 +945,7 @@
_ReturnAddress(void) {
return __builtin_return_address(0);
}
+#if defined(__i386__) || defined(__x86_64__)
static __inline__ void __attribute__((__always_inline__, __nodebug__))
__cpuid(int __info[4], int __level) {
__asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
@@ -957,10 +966,12 @@
__halt(void) {
__asm__ volatile ("hlt");
}
+#endif
/*----------------------------------------------------------------------------*\
|* Privileged intrinsics
\*----------------------------------------------------------------------------*/
+#if defined(__i386__) || defined(__x86_64__)
static __inline__ unsigned __int64 __attribute__((__always_inline__, __nodebug__))
__readmsr(unsigned long __register) {
// Loads the contents of a 64-bit model specific register (MSR) specified in
@@ -986,6 +997,7 @@
__writecr3(unsigned int __cr3_val) {
__asm__ ("mov %0, %%cr3" : : "q"(__cr3_val) : "memory");
}
+#endif
#ifdef __cplusplus
}