AMDGPU: Separate R600 and GCN TableGen files

Summary:
We now have two sets of generated TableGen files, one for R600 and one
for GCN, so each sub-target now has its own tables of instructions,
registers, ISel patterns, etc.  This should help reduce compile time
since each sub-target now only has to consider information that
is specific to itself.  This will also help prevent the R600
sub-target from slowing down new features for GCN, like disassembler
support, GlobalISel, etc.

Reviewers: arsenm, nhaehnle, jvesely

Reviewed By: arsenm

Subscribers: MatzeB, kzhuravl, wdng, mgorny, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits

Differential Revision: https://reviews.llvm.org/D46365

llvm-svn: 335942
diff --git a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp
index dbe83bc..afded91 100644
--- a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp
+++ b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp
@@ -162,7 +162,7 @@
       for (MachineInstr::mop_iterator It = SU->getInstr()->operands_begin(),
           E = SU->getInstr()->operands_end(); It != E; ++It) {
         MachineOperand &MO = *It;
-        if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X)
+        if (MO.isReg() && MO.getReg() == R600::ALU_LITERAL_X)
           ++CurEmitted;
       }
     }
@@ -181,7 +181,7 @@
 
 static bool
 isPhysicalRegCopy(MachineInstr *MI) {
-  if (MI->getOpcode() != AMDGPU::COPY)
+  if (MI->getOpcode() != R600::COPY)
     return false;
 
   return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg());
@@ -224,14 +224,14 @@
     return AluTrans;
 
   switch (MI->getOpcode()) {
-  case AMDGPU::PRED_X:
+  case R600::PRED_X:
     return AluPredX;
-  case AMDGPU::INTERP_PAIR_XY:
-  case AMDGPU::INTERP_PAIR_ZW:
-  case AMDGPU::INTERP_VEC_LOAD:
-  case AMDGPU::DOT_4:
+  case R600::INTERP_PAIR_XY:
+  case R600::INTERP_PAIR_ZW:
+  case R600::INTERP_VEC_LOAD:
+  case R600::DOT_4:
     return AluT_XYZW;
-  case AMDGPU::COPY:
+  case R600::COPY:
     if (MI->getOperand(1).isUndef()) {
       // MI will become a KILL, don't considers it in scheduling
       return AluDiscarded;
@@ -246,7 +246,7 @@
   if(TII->isVector(*MI) ||
      TII->isCubeOp(MI->getOpcode()) ||
      TII->isReductionOp(MI->getOpcode()) ||
-     MI->getOpcode() == AMDGPU::GROUP_BARRIER) {
+     MI->getOpcode() == R600::GROUP_BARRIER) {
     return AluT_XYZW;
   }
 
@@ -257,13 +257,13 @@
   // Is the result already assigned to a channel ?
   unsigned DestSubReg = MI->getOperand(0).getSubReg();
   switch (DestSubReg) {
-  case AMDGPU::sub0:
+  case R600::sub0:
     return AluT_X;
-  case AMDGPU::sub1:
+  case R600::sub1:
     return AluT_Y;
-  case AMDGPU::sub2:
+  case R600::sub2:
     return AluT_Z;
-  case AMDGPU::sub3:
+  case R600::sub3:
     return AluT_W;
   default:
     break;
@@ -271,16 +271,16 @@
 
   // Is the result already member of a X/Y/Z/W class ?
   unsigned DestReg = MI->getOperand(0).getReg();
-  if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) ||
-      regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass))
+  if (regBelongsToClass(DestReg, &R600::R600_TReg32_XRegClass) ||
+      regBelongsToClass(DestReg, &R600::R600_AddrRegClass))
     return AluT_X;
-  if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass))
+  if (regBelongsToClass(DestReg, &R600::R600_TReg32_YRegClass))
     return AluT_Y;
-  if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass))
+  if (regBelongsToClass(DestReg, &R600::R600_TReg32_ZRegClass))
     return AluT_Z;
-  if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass))
+  if (regBelongsToClass(DestReg, &R600::R600_TReg32_WRegClass))
     return AluT_W;
-  if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass))
+  if (regBelongsToClass(DestReg, &R600::R600_Reg128RegClass))
     return AluT_XYZW;
 
   // LDS src registers cannot be used in the Trans slot.
@@ -301,13 +301,13 @@
   }
 
   switch (Opcode) {
-  case AMDGPU::PRED_X:
-  case AMDGPU::COPY:
-  case AMDGPU::CONST_COPY:
-  case AMDGPU::INTERP_PAIR_XY:
-  case AMDGPU::INTERP_PAIR_ZW:
-  case AMDGPU::INTERP_VEC_LOAD:
-  case AMDGPU::DOT_4:
+  case R600::PRED_X:
+  case R600::COPY:
+  case R600::CONST_COPY:
+  case R600::INTERP_PAIR_XY:
+  case R600::INTERP_PAIR_ZW:
+  case R600::INTERP_VEC_LOAD:
+  case R600::DOT_4:
     return IDAlu;
   default:
     return IDOther;
@@ -353,7 +353,7 @@
 }
 
 void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
-  int DstIndex = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+  int DstIndex = TII->getOperandIdx(MI->getOpcode(), R600::OpName::dst);
   if (DstIndex == -1) {
     return;
   }
@@ -370,16 +370,16 @@
   // Constrains the regclass of DestReg to assign it to Slot
   switch (Slot) {
   case 0:
-    MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_XRegClass);
+    MRI->constrainRegClass(DestReg, &R600::R600_TReg32_XRegClass);
     break;
   case 1:
-    MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_YRegClass);
+    MRI->constrainRegClass(DestReg, &R600::R600_TReg32_YRegClass);
     break;
   case 2:
-    MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass);
+    MRI->constrainRegClass(DestReg, &R600::R600_TReg32_ZRegClass);
     break;
   case 3:
-    MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_WRegClass);
+    MRI->constrainRegClass(DestReg, &R600::R600_TReg32_WRegClass);
     break;
   }
 }