Simplify some x86 format classes and remove some ambiguities in their application.
[oota-llvm.git] / lib / Target / AArch64 / AArch64InstrInfo.cpp
index 1e19eb0c741736e6c3f000ee3e518a4c1359a9bb..ba185a436f1625e3ced226c21eb73bd06d71d97d 100644 (file)
@@ -26,7 +26,6 @@
 #include "llvm/IR/Function.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/TargetRegistry.h"
-
 #include <algorithm>
 
 #define GET_INSTRINFO_CTOR_DTOR
@@ -133,8 +132,19 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
         .addImm(16);
       return;
     }
+  } else if (AArch64::FPR16RegClass.contains(DestReg, SrcReg)) {
+    // The copy of two FPR16 registers is implemented by the copy of two FPR32
+    const TargetRegisterInfo *TRI = &getRegisterInfo();
+    unsigned Dst = TRI->getMatchingSuperReg(DestReg, AArch64::sub_16,
+                                            &AArch64::FPR32RegClass);
+    unsigned Src = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_16,
+                                            &AArch64::FPR32RegClass);
+    BuildMI(MBB, I, DL, get(AArch64::FMOVss), Dst)
+      .addReg(Src);
+    return;
   } else {
-    llvm_unreachable("Unknown register class in copyPhysReg");
+    CopyPhysRegTuple(MBB, I, DL, DestReg, SrcReg);
+    return;
   }
 
   // E.g. ORR xDst, xzr, xSrc, lsl #0
@@ -144,6 +154,55 @@ void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
     .addImm(0);
 }
 
+void AArch64InstrInfo::CopyPhysRegTuple(MachineBasicBlock &MBB,
+                                        MachineBasicBlock::iterator I,
+                                        DebugLoc DL, unsigned DestReg,
+                                        unsigned SrcReg) const {
+  unsigned SubRegs;
+  bool IsQRegs;
+  if (AArch64::DPairRegClass.contains(DestReg, SrcReg)) {
+    SubRegs = 2;
+    IsQRegs = false;
+  } else if (AArch64::DTripleRegClass.contains(DestReg, SrcReg)) {
+    SubRegs = 3;
+    IsQRegs = false;
+  } else if (AArch64::DQuadRegClass.contains(DestReg, SrcReg)) {
+    SubRegs = 4;
+    IsQRegs = false;
+  } else if (AArch64::QPairRegClass.contains(DestReg, SrcReg)) {
+    SubRegs = 2;
+    IsQRegs = true;
+  } else if (AArch64::QTripleRegClass.contains(DestReg, SrcReg)) {
+    SubRegs = 3;
+    IsQRegs = true;
+  } else if (AArch64::QQuadRegClass.contains(DestReg, SrcReg)) {
+    SubRegs = 4;
+    IsQRegs = true;
+  } else
+    llvm_unreachable("Unknown register class");
+
+  unsigned BeginIdx = IsQRegs ? AArch64::qsub_0 : AArch64::dsub_0;
+  int Spacing = 1;
+  const TargetRegisterInfo *TRI = &getRegisterInfo();
+  // Copy register tuples backward when the first Dest reg overlaps
+  // with SrcReg.
+  if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) {
+    BeginIdx = BeginIdx + (SubRegs - 1);
+    Spacing = -1;
+  }
+
+  unsigned Opc = IsQRegs ? AArch64::ORRvvv_16B : AArch64::ORRvvv_8B;
+  for (unsigned i = 0; i != SubRegs; ++i) {
+    unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
+    unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
+    assert(Dst && Src && "Bad sub-register");
+    BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst)
+        .addReg(Src)
+        .addReg(Src);
+  }
+  return;
+}
+
 /// Does the Opcode represent a conditional branch that we can remove and re-add
 /// at the end of a basic block?
 static bool isCondBranch(unsigned Opc) {
@@ -427,12 +486,18 @@ AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
     default:
       llvm_unreachable("Unknown size for regclass");
     }
-  } else { // The spill of D tuples is implemented by Q tuples
-    if (RC == &AArch64::QPairRegClass)
+  } else { // For a super register class has more than one sub registers
+    if (AArch64::DPairRegClass.hasSubClassEq(RC))
+      StoreOp = AArch64::ST1x2_8B;
+    else if (AArch64::DTripleRegClass.hasSubClassEq(RC))
+      StoreOp = AArch64::ST1x3_8B;
+    else if (AArch64::DQuadRegClass.hasSubClassEq(RC))
+      StoreOp = AArch64::ST1x4_8B;
+    else if (AArch64::QPairRegClass.hasSubClassEq(RC))
       StoreOp = AArch64::ST1x2_16B;
-    else if (RC == &AArch64::QTripleRegClass)
+    else if (AArch64::QTripleRegClass.hasSubClassEq(RC))
       StoreOp = AArch64::ST1x3_16B;
-    else if (RC == &AArch64::QQuadRegClass)
+    else if (AArch64::QQuadRegClass.hasSubClassEq(RC))
       StoreOp = AArch64::ST1x4_16B;
     else
       llvm_unreachable("Unknown reg class");
@@ -487,12 +552,18 @@ AArch64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
     default:
       llvm_unreachable("Unknown size for regclass");
     }
-  } else { // The spill of D tuples is implemented by Q tuples
-    if (RC == &AArch64::QPairRegClass)
+  } else { // For a super register class has more than one sub registers
+    if (AArch64::DPairRegClass.hasSubClassEq(RC))
+      LoadOp = AArch64::LD1x2_8B;
+    else if (AArch64::DTripleRegClass.hasSubClassEq(RC))
+      LoadOp = AArch64::LD1x3_8B;
+    else if (AArch64::DQuadRegClass.hasSubClassEq(RC))
+      LoadOp = AArch64::LD1x4_8B;
+    else if (AArch64::QPairRegClass.hasSubClassEq(RC))
       LoadOp = AArch64::LD1x2_16B;
-    else if (RC == &AArch64::QTripleRegClass)
+    else if (AArch64::QTripleRegClass.hasSubClassEq(RC))
       LoadOp = AArch64::LD1x3_16B;
-    else if (RC == &AArch64::QQuadRegClass)
+    else if (AArch64::QQuadRegClass.hasSubClassEq(RC))
       LoadOp = AArch64::LD1x4_16B;
     else
       llvm_unreachable("Unknown reg class");
@@ -540,7 +611,8 @@ void AArch64InstrInfo::getAddressConstraints(const MachineInstr &MI,
                                              int &AccessScale, int &MinOffset,
                                              int &MaxOffset) const {
   switch (MI.getOpcode()) {
-  default: llvm_unreachable("Unkown load/store kind");
+  default:
+    llvm_unreachable("Unknown load/store kind");
   case TargetOpcode::DBG_VALUE:
     AccessScale = 1;
     MinOffset = INT_MIN;
@@ -599,6 +671,17 @@ void AArch64InstrInfo::getAddressConstraints(const MachineInstr &MI,
     MinOffset = -0x40 * AccessScale;
     MaxOffset = 0x3f * AccessScale;
     return;
+  case AArch64::LD1x2_8B: case AArch64::ST1x2_8B:
+    AccessScale = 16;
+    MinOffset = 0;
+    MaxOffset = 0xfff * AccessScale;
+    return;
+  case AArch64::LD1x3_8B: case AArch64::ST1x3_8B:
+    AccessScale = 24;
+    MinOffset = 0;
+    MaxOffset = 0xfff * AccessScale;
+    return;
+  case AArch64::LD1x4_8B: case AArch64::ST1x4_8B:
   case AArch64::LD1x2_16B: case AArch64::ST1x2_16B:
     AccessScale = 32;
     MinOffset = 0;