Add Mode64Bit feature and sink it down to MC layer.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@134641 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index 1c1a10d..cba0484 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -158,7 +158,7 @@
 /// IsLegalToCallImmediateAddr - Return true if the subtarget allows calls
 /// to immediate address.
 bool X86Subtarget::IsLegalToCallImmediateAddr(const TargetMachine &TM) const {
-  if (Is64Bit)
+  if (In64BitMode)
     return false;
   return isTargetELF() || TM.getRelocationModel() == Reloc::Static;
 }
@@ -174,73 +174,6 @@
   return 200;
 }
 
-/// GetCpuIDAndInfo - Execute the specified cpuid and return the 4 values in the
-/// specified arguments.  If we can't run cpuid on the host, return true.
-static bool GetCpuIDAndInfo(unsigned value, unsigned *rEAX,
-                            unsigned *rEBX, unsigned *rECX, unsigned *rEDX) {
-#if defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64)
-  #if defined(__GNUC__)
-    // gcc doesn't know cpuid would clobber ebx/rbx. Preseve it manually.
-    asm ("movq\t%%rbx, %%rsi\n\t"
-         "cpuid\n\t"
-         "xchgq\t%%rbx, %%rsi\n\t"
-         : "=a" (*rEAX),
-           "=S" (*rEBX),
-           "=c" (*rECX),
-           "=d" (*rEDX)
-         :  "a" (value));
-    return false;
-  #elif defined(_MSC_VER)
-    int registers[4];
-    __cpuid(registers, value);
-    *rEAX = registers[0];
-    *rEBX = registers[1];
-    *rECX = registers[2];
-    *rEDX = registers[3];
-    return false;
-  #endif
-#elif defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86)
-  #if defined(__GNUC__)
-    asm ("movl\t%%ebx, %%esi\n\t"
-         "cpuid\n\t"
-         "xchgl\t%%ebx, %%esi\n\t"
-         : "=a" (*rEAX),
-           "=S" (*rEBX),
-           "=c" (*rECX),
-           "=d" (*rEDX)
-         :  "a" (value));
-    return false;
-  #elif defined(_MSC_VER)
-    __asm {
-      mov   eax,value
-      cpuid
-      mov   esi,rEAX
-      mov   dword ptr [esi],eax
-      mov   esi,rEBX
-      mov   dword ptr [esi],ebx
-      mov   esi,rECX
-      mov   dword ptr [esi],ecx
-      mov   esi,rEDX
-      mov   dword ptr [esi],edx
-    }
-    return false;
-  #endif
-#endif
-  return true;
-}
-
-static void DetectFamilyModel(unsigned EAX, unsigned &Family, unsigned &Model) {
-  Family = (EAX >> 8) & 0xf; // Bits 8 - 11
-  Model  = (EAX >> 4) & 0xf; // Bits 4 - 7
-  if (Family == 6 || Family == 0xf) {
-    if (Family == 0xf)
-      // Examine extended family ID if family ID is F.
-      Family += (EAX >> 20) & 0xff;    // Bits 20 - 27
-    // Examine extended model ID if family ID is 6 or F.
-    Model += ((EAX >> 16) & 0xf) << 4; // Bits 16 - 19
-  }
-}
-
 void X86Subtarget::AutoDetectSubtargetFeatures() {
   unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0;
   union {
@@ -248,10 +181,10 @@
     char     c[12];
   } text;
   
-  if (GetCpuIDAndInfo(0, &EAX, text.u+0, text.u+2, text.u+1))
+  if (X86_MC::GetCpuIDAndInfo(0, &EAX, text.u+0, text.u+2, text.u+1))
     return;
 
-  GetCpuIDAndInfo(0x1, &EAX, &EBX, &ECX, &EDX);
+  X86_MC::GetCpuIDAndInfo(0x1, &EAX, &EBX, &ECX, &EDX);
   
   if ((EDX >> 15) & 1) HasCMov = true;
   if ((EDX >> 23) & 1) X86SSELevel = MMX;
@@ -276,13 +209,13 @@
     // Determine if bit test memory instructions are slow.
     unsigned Family = 0;
     unsigned Model  = 0;
-    DetectFamilyModel(EAX, Family, Model);
+    X86_MC::DetectFamilyModel(EAX, Family, Model);
     IsBTMemSlow = IsAMD || (Family == 6 && Model >= 13);
     // If it's Nehalem, unaligned memory access is fast.
     if (Family == 15 && Model == 26)
       IsUAMemFast = true;
 
-    GetCpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
+    X86_MC::GetCpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
     HasX86_64 = (EDX >> 29) & 0x1;
     HasSSE4A = IsAMD && ((ECX >> 6) & 0x1);
     HasFMA4 = IsAMD && ((ECX >> 16) & 0x1);
@@ -291,7 +224,7 @@
 
 X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
                            const std::string &FS, 
-                           bool is64Bit, unsigned StackAlignOverride)
+                           unsigned StackAlignOverride)
   : X86GenSubtargetInfo(TT, CPU, FS)
   , PICStyle(PICStyles::None)
   , X86SSELevel(NoMMXSSE)
@@ -312,15 +245,26 @@
   // FIXME: this is a known good value for Yonah. How about others?
   , MaxInlineSizeThreshold(128)
   , TargetTriple(TT)
-  , Is64Bit(is64Bit) {
+  , In64BitMode(false) {
+  // Insert the architecture feature derived from the target triple into the
+  // feature string. This is important for setting features that are implied
+  // based on the architecture version.
+  std::string ArchFS = X86_MC::ParseX86Triple(TT);
+  if (!FS.empty()) {
+    if (!ArchFS.empty())
+      ArchFS = ArchFS + "," + FS;
+    else
+      ArchFS = FS;
+  }
+
+  std::string CPUName = CPU;
+  if (CPUName.empty())
+    CPUName = sys::getHostCPUName();
 
   // Determine default and user specified characteristics
-  if (!CPU.empty() || !FS.empty()) {
+  if (!CPUName.empty() || !ArchFS.empty()) {
     // If feature string is not empty, parse features string.
-    std::string CPUName = CPU;
-    if (CPUName.empty())
-      CPUName = sys::getHostCPUName();
-    ParseSubtargetFeatures(CPUName, FS);
+    ParseSubtargetFeatures(CPUName, ArchFS);
     // All X86-64 CPUs also have SSE2, however user might request no SSE via 
     // -mattr, so don't force SSELevel here.
     if (HasAVX)
@@ -328,14 +272,19 @@
   } else {
     // Otherwise, use CPUID to auto-detect feature set.
     AutoDetectSubtargetFeatures();
+
+    // If CPU is 64-bit capable, default to 64-bit mode if not specified.
+    In64BitMode = HasX86_64;
+
     // Make sure SSE2 is enabled; it is available on all X86-64 CPUs.
-    if (Is64Bit && !HasAVX && X86SSELevel < SSE2)
+    if (In64BitMode && !HasAVX && X86SSELevel < SSE2)
       X86SSELevel = SSE2;
   }
 
   // If requesting codegen for X86-64, make sure that 64-bit features
   // are enabled.
-  if (Is64Bit) {
+  // FIXME: Remove this feature since it's not actually being used.
+  if (In64BitMode) {
     HasX86_64 = true;
 
     // All 64-bit cpus have cmov support.
@@ -345,7 +294,7 @@
   DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
                << ", 3DNowLevel " << X863DNowLevel
                << ", 64bit " << HasX86_64 << "\n");
-  assert((!Is64Bit || HasX86_64) &&
+  assert((!In64BitMode || HasX86_64) &&
          "64-bit code requested on a subtarget that doesn't support it!");
 
   // Stack alignment is 16 bytes on Darwin, FreeBSD, Linux and Solaris (both
@@ -353,6 +302,6 @@
   if (StackAlignOverride)
     stackAlignment = StackAlignOverride;
   else if (isTargetDarwin() || isTargetFreeBSD() || isTargetLinux() ||
-           isTargetSolaris() || Is64Bit)
+           isTargetSolaris() || In64BitMode)
     stackAlignment = 16;
 }