Fix the ridiculous SubtargetFeatures API where it implicitly expects CPU name to
be the first encoded as the first feature. It then uses the CPU name to look up
features / scheduling itineray even though clients know full well the CPU name
being used to query these properties.

The fix is to just have the clients explictly pass the CPU name!


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@134127 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index adcf69a..d7f630c 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -284,7 +284,8 @@
   }
 }
 
-X86Subtarget::X86Subtarget(const std::string &TT, const std::string &FS, 
+X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
+                           const std::string &FS, 
                            bool is64Bit, unsigned StackAlignOverride)
   : PICStyle(PICStyles::None)
   , X86SSELevel(NoMMXSSE)
@@ -308,10 +309,12 @@
   , Is64Bit(is64Bit) {
 
   // Determine default and user specified characteristics
-  if (!FS.empty()) {
+  if (!CPU.empty() || !FS.empty()) {
     // If feature string is not empty, parse features string.
-    std::string CPU = sys::getHostCPUName();
-    ParseSubtargetFeatures(FS, CPU);
+    std::string CPUName = CPU;
+    if (CPUName.empty())
+      CPUName = sys::getHostCPUName();
+    ParseSubtargetFeatures(FS, CPUName);
     // All X86-64 CPUs also have SSE2, however user might request no SSE via 
     // -mattr, so don't force SSELevel here.
     if (HasAVX)