//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "arm64-isel"
#include "ARM64TargetMachine.h"
#include "MCTargetDesc/ARM64AddressingModes.h"
#include "llvm/ADT/APSInt.h"
using namespace llvm;
+#define DEBUG_TYPE "arm64-isel"
+
//===--------------------------------------------------------------------===//
/// ARM64DAGToDAGISel - ARM64 specific code to select ARM64 machine
/// instructions for SelectionDAG operations.
public:
explicit ARM64DAGToDAGISel(ARM64TargetMachine &tm, CodeGenOpt::Level OptLevel)
: SelectionDAGISel(tm, OptLevel), TM(tm),
- Subtarget(&TM.getSubtarget<ARM64Subtarget>()), ForCodeSize(false) {}
+ Subtarget(nullptr), ForCodeSize(false) {}
- virtual const char *getPassName() const {
+ const char *getPassName() const override {
return "ARM64 Instruction Selection";
}
- virtual bool runOnMachineFunction(MachineFunction &MF) {
+ bool runOnMachineFunction(MachineFunction &MF) override {
AttributeSet FnAttrs = MF.getFunction()->getAttributes();
ForCodeSize =
FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
Attribute::OptimizeForSize) ||
FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
+ Subtarget = &TM.getSubtarget<ARM64Subtarget>();
return SelectionDAGISel::runOnMachineFunction(MF);
}
- SDNode *Select(SDNode *Node);
+ SDNode *Select(SDNode *Node) override;
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
- virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps);
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps) override;
SDNode *SelectMLAV64LaneV128(SDNode *N);
SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
return SelectAddrModeUnscaled(N, 16, Base, OffImm);
}
- bool SelectAddrModeRO8(SDValue N, SDValue &Base, SDValue &Offset,
- SDValue &Imm) {
- return SelectAddrModeRO(N, 1, Base, Offset, Imm);
- }
- bool SelectAddrModeRO16(SDValue N, SDValue &Base, SDValue &Offset,
- SDValue &Imm) {
- return SelectAddrModeRO(N, 2, Base, Offset, Imm);
- }
- bool SelectAddrModeRO32(SDValue N, SDValue &Base, SDValue &Offset,
- SDValue &Imm) {
- return SelectAddrModeRO(N, 4, Base, Offset, Imm);
+ template<int Width>
+ bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
+ SDValue &SignExtend, SDValue &DoShift) {
+ return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
}
- bool SelectAddrModeRO64(SDValue N, SDValue &Base, SDValue &Offset,
- SDValue &Imm) {
- return SelectAddrModeRO(N, 8, Base, Offset, Imm);
- }
- bool SelectAddrModeRO128(SDValue N, SDValue &Base, SDValue &Offset,
- SDValue &Imm) {
- return SelectAddrModeRO(N, 16, Base, Offset, Imm);
+
+ template<int Width>
+ bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
+ SDValue &SignExtend, SDValue &DoShift) {
+ return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
}
- bool SelectAddrModeNoIndex(SDValue N, SDValue &Val);
+
/// Form sequences of consecutive 64/128-bit registers for use in NEON
/// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
unsigned SubRegIdx);
+ SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
+ unsigned SubRegIdx);
SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+ SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
+ SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
+ SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
SDNode *SelectSIMDAddSubNarrowing(unsigned IntNo, SDNode *Node);
SDNode *SelectSIMDXtnNarrowing(unsigned IntNo, SDNode *Node);
- SDNode *SelectAtomic(SDNode *Node, unsigned Op8, unsigned Op16, unsigned Op32,
- unsigned Op64);
-
SDNode *SelectBitfieldExtractOp(SDNode *N);
SDNode *SelectBitfieldInsertOp(SDNode *N);
SDValue &OffImm);
bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
SDValue &OffImm);
- bool SelectAddrModeRO(SDValue N, unsigned Size, SDValue &Base,
- SDValue &Offset, SDValue &Imm);
+ bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
+ SDValue &Offset, SDValue &SignExtend,
+ SDValue &DoShift);
+ bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
+ SDValue &Offset, SDValue &SignExtend,
+ SDValue &DoShift);
bool isWorthFolding(SDValue V) const;
- bool SelectExtendedSHL(SDValue N, unsigned Size, SDValue &Offset,
- SDValue &Imm);
+ bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
+ SDValue &Offset, SDValue &SignExtend);
template<unsigned RegWidth>
bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
isIntImmediate(N->getOperand(1).getNode(), Imm);
}
-bool ARM64DAGToDAGISel::SelectAddrModeNoIndex(SDValue N, SDValue &Val) {
- EVT ValTy = N.getValueType();
- if (ValTy != MVT::i64)
- return false;
- Val = N;
- return true;
-}
-
bool ARM64DAGToDAGISel::SelectInlineAsmMemoryOperand(
const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps) {
assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
/// getShiftTypeForNode - Translate a shift node to the corresponding
/// ShiftType value.
-static ARM64_AM::ShiftType getShiftTypeForNode(SDValue N) {
+static ARM64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
switch (N.getOpcode()) {
default:
- return ARM64_AM::InvalidShift;
+ return ARM64_AM::InvalidShiftExtend;
case ISD::SHL:
return ARM64_AM::LSL;
case ISD::SRL:
/// supported.
bool ARM64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
SDValue &Reg, SDValue &Shift) {
- ARM64_AM::ShiftType ShType = getShiftTypeForNode(N);
- if (ShType == ARM64_AM::InvalidShift)
+ ARM64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
+ if (ShType == ARM64_AM::InvalidShiftExtend)
return false;
if (!AllowROR && ShType == ARM64_AM::ROR)
return false;
/// getExtendTypeForNode - Translate an extend node to the corresponding
/// ExtendType value.
-static ARM64_AM::ExtendType getExtendTypeForNode(SDValue N,
- bool IsLoadStore = false) {
+static ARM64_AM::ShiftExtendType
+getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
if (N.getOpcode() == ISD::SIGN_EXTEND ||
N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
EVT SrcVT;
return ARM64_AM::SXTH;
else if (SrcVT == MVT::i32)
return ARM64_AM::SXTW;
- else if (SrcVT == MVT::i64)
- return ARM64_AM::SXTX;
+ assert(SrcVT != MVT::i64 && "extend from 64-bits?");
- return ARM64_AM::InvalidExtend;
+ return ARM64_AM::InvalidShiftExtend;
} else if (N.getOpcode() == ISD::ZERO_EXTEND ||
N.getOpcode() == ISD::ANY_EXTEND) {
EVT SrcVT = N.getOperand(0).getValueType();
return ARM64_AM::UXTH;
else if (SrcVT == MVT::i32)
return ARM64_AM::UXTW;
- else if (SrcVT == MVT::i64)
- return ARM64_AM::UXTX;
+ assert(SrcVT != MVT::i64 && "extend from 64-bits?");
- return ARM64_AM::InvalidExtend;
+ return ARM64_AM::InvalidShiftExtend;
} else if (N.getOpcode() == ISD::AND) {
ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
if (!CSD)
- return ARM64_AM::InvalidExtend;
+ return ARM64_AM::InvalidShiftExtend;
uint64_t AndMask = CSD->getZExtValue();
switch (AndMask) {
default:
- return ARM64_AM::InvalidExtend;
+ return ARM64_AM::InvalidShiftExtend;
case 0xFF:
- return !IsLoadStore ? ARM64_AM::UXTB : ARM64_AM::InvalidExtend;
+ return !IsLoadStore ? ARM64_AM::UXTB : ARM64_AM::InvalidShiftExtend;
case 0xFFFF:
- return !IsLoadStore ? ARM64_AM::UXTH : ARM64_AM::InvalidExtend;
+ return !IsLoadStore ? ARM64_AM::UXTH : ARM64_AM::InvalidShiftExtend;
case 0xFFFFFFFF:
return ARM64_AM::UXTW;
}
}
- return ARM64_AM::InvalidExtend;
+ return ARM64_AM::InvalidShiftExtend;
}
// Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
if (Op1.getOpcode() != ISD::MUL ||
!checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
LaneIdx))
- return 0;
+ return nullptr;
}
SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
LaneIdx))
- return 0;
+ return nullptr;
SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
return CurDAG->getMachineNode(SMULLOpc, SDLoc(N), N->getValueType(0), Ops);
}
+/// Instructions that accept extend modifiers like UXTW expect the register
+/// being extended to be a GPR32, but the incoming DAG might be acting on a
+/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
+/// this is the case.
+static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
+ if (N.getValueType() == MVT::i32)
+ return N;
+
+ SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
+ MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
+ SDLoc(N), MVT::i32, N, SubReg);
+ return SDValue(Node, 0);
+}
+
+
/// SelectArithExtendedRegister - Select a "extended register" operand. This
/// operand folds in an extend followed by an optional left shift.
bool ARM64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
SDValue &Shift) {
unsigned ShiftVal = 0;
- ARM64_AM::ExtendType Ext;
+ ARM64_AM::ShiftExtendType Ext;
if (N.getOpcode() == ISD::SHL) {
ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
return false;
Ext = getExtendTypeForNode(N.getOperand(0));
- if (Ext == ARM64_AM::InvalidExtend)
+ if (Ext == ARM64_AM::InvalidShiftExtend)
return false;
Reg = N.getOperand(0).getOperand(0);
} else {
Ext = getExtendTypeForNode(N);
- if (Ext == ARM64_AM::InvalidExtend)
+ if (Ext == ARM64_AM::InvalidShiftExtend)
return false;
Reg = N.getOperand(0);
// if we're folding a (sext i8), we need the RHS to be a GPR32, even though
// there might not be an actual 32-bit value in the program. We can
// (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
- if (Reg.getValueType() == MVT::i64 && Ext != ARM64_AM::UXTX &&
- Ext != ARM64_AM::SXTX) {
- SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
- MachineSDNode *Node = CurDAG->getMachineNode(
- TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32, Reg, SubReg);
- Reg = SDValue(Node, 0);
- }
-
+ assert(Ext != ARM64_AM::UXTX && Ext != ARM64_AM::SXTX);
+ Reg = narrowIfNeeded(CurDAG, Reg);
Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), MVT::i32);
return isWorthFolding(N);
}
return SDValue(Node, 0);
}
-static SDValue WidenIfNeeded(SelectionDAG *CurDAG, SDValue N) {
- if (N.getValueType() == MVT::i32) {
- return Widen(CurDAG, N);
- }
-
- return N;
-}
-
/// \brief Check if the given SHL node (\p N), can be used to form an
/// extended register for an addressing mode.
bool ARM64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
- SDValue &Offset, SDValue &Imm) {
+ bool WantExtend, SDValue &Offset,
+ SDValue &SignExtend) {
assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
- if (CSD && (CSD->getZExtValue() & 0x7) == CSD->getZExtValue()) {
-
- ARM64_AM::ExtendType Ext = getExtendTypeForNode(N.getOperand(0), true);
- if (Ext == ARM64_AM::InvalidExtend) {
- Ext = ARM64_AM::UXTX;
- Offset = WidenIfNeeded(CurDAG, N.getOperand(0));
- } else {
- Offset = WidenIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
- }
-
- unsigned LegalShiftVal = Log2_32(Size);
- unsigned ShiftVal = CSD->getZExtValue();
+ if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
+ return false;
- if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
+ if (WantExtend) {
+ ARM64_AM::ShiftExtendType Ext = getExtendTypeForNode(N.getOperand(0), true);
+ if (Ext == ARM64_AM::InvalidShiftExtend)
return false;
- Imm = CurDAG->getTargetConstant(
- ARM64_AM::getMemExtendImm(Ext, ShiftVal != 0), MVT::i32);
- if (isWorthFolding(N))
- return true;
+ Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
+ SignExtend = CurDAG->getTargetConstant(Ext == ARM64_AM::SXTW, MVT::i32);
+ } else {
+ Offset = N.getOperand(0);
+ SignExtend = CurDAG->getTargetConstant(0, MVT::i32);
}
+
+ unsigned LegalShiftVal = Log2_32(Size);
+ unsigned ShiftVal = CSD->getZExtValue();
+
+ if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
+ return false;
+
+ if (isWorthFolding(N))
+ return true;
+
return false;
}
-bool ARM64DAGToDAGISel::SelectAddrModeRO(SDValue N, unsigned Size,
+bool ARM64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
SDValue &Base, SDValue &Offset,
- SDValue &Imm) {
+ SDValue &SignExtend,
+ SDValue &DoShift) {
if (N.getOpcode() != ISD::ADD)
return false;
SDValue LHS = N.getOperand(0);
// Try to match a shifted extend on the RHS.
if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
- SelectExtendedSHL(RHS, Size, Offset, Imm)) {
+ SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
Base = LHS;
+ DoShift = CurDAG->getTargetConstant(true, MVT::i32);
return true;
}
// Try to match a shifted extend on the LHS.
if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
- SelectExtendedSHL(LHS, Size, Offset, Imm)) {
+ SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
Base = RHS;
+ DoShift = CurDAG->getTargetConstant(true, MVT::i32);
return true;
}
- ARM64_AM::ExtendType Ext = ARM64_AM::UXTX;
+ // There was no shift, whatever else we find.
+ DoShift = CurDAG->getTargetConstant(false, MVT::i32);
+
+ ARM64_AM::ShiftExtendType Ext = ARM64_AM::InvalidShiftExtend;
// Try to match an unshifted extend on the LHS.
if (IsExtendedRegisterWorthFolding &&
- (Ext = getExtendTypeForNode(LHS, true)) != ARM64_AM::InvalidExtend) {
+ (Ext = getExtendTypeForNode(LHS, true)) != ARM64_AM::InvalidShiftExtend) {
Base = RHS;
- Offset = WidenIfNeeded(CurDAG, LHS.getOperand(0));
- Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
- MVT::i32);
+ Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
+ SignExtend = CurDAG->getTargetConstant(Ext == ARM64_AM::SXTW, MVT::i32);
if (isWorthFolding(LHS))
return true;
}
// Try to match an unshifted extend on the RHS.
if (IsExtendedRegisterWorthFolding &&
- (Ext = getExtendTypeForNode(RHS, true)) != ARM64_AM::InvalidExtend) {
+ (Ext = getExtendTypeForNode(RHS, true)) != ARM64_AM::InvalidShiftExtend) {
Base = LHS;
- Offset = WidenIfNeeded(CurDAG, RHS.getOperand(0));
- Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
- MVT::i32);
+ Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
+ SignExtend = CurDAG->getTargetConstant(Ext == ARM64_AM::SXTW, MVT::i32);
if (isWorthFolding(RHS))
return true;
}
+ return false;
+}
+
+bool ARM64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
+ SDValue &Base, SDValue &Offset,
+ SDValue &SignExtend,
+ SDValue &DoShift) {
+ if (N.getOpcode() != ISD::ADD)
+ return false;
+ SDValue LHS = N.getOperand(0);
+ SDValue RHS = N.getOperand(1);
+
+ // We don't want to match immediate adds here, because they are better lowered
+ // to the register-immediate addressing modes.
+ if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
+ return false;
+
+ // Check if this particular node is reused in any non-memory related
+ // operation. If yes, do not try to fold this node into the address
+ // computation, since the computation will be kept.
+ const SDNode *Node = N.getNode();
+ for (SDNode *UI : Node->uses()) {
+ if (!isa<MemSDNode>(*UI))
+ return false;
+ }
+
+ // Remember if it is worth folding N when it produces extended register.
+ bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
+
+ // Try to match a shifted extend on the RHS.
+ if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
+ SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
+ Base = LHS;
+ DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ return true;
+ }
+
+ // Try to match a shifted extend on the LHS.
+ if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
+ SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
+ Base = RHS;
+ DoShift = CurDAG->getTargetConstant(true, MVT::i32);
+ return true;
+ }
+
// Match any non-shifted, non-extend, non-immediate add expression.
Base = LHS;
- Offset = WidenIfNeeded(CurDAG, RHS);
- Ext = ARM64_AM::UXTX;
- Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
- MVT::i32);
+ Offset = RHS;
+ SignExtend = CurDAG->getTargetConstant(false, MVT::i32);
+ DoShift = CurDAG->getTargetConstant(false, MVT::i32);
// Reg1 + Reg2 is free: no check needed.
return true;
}
SDNode *ARM64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
LoadSDNode *LD = cast<LoadSDNode>(N);
if (LD->isUnindexed())
- return NULL;
+ return nullptr;
EVT VT = LD->getMemoryVT();
EVT DstVT = N->getValueType(0);
ISD::MemIndexedMode AM = LD->getAddressingMode();
ISD::LoadExtType ExtType = LD->getExtensionType();
bool InsertTo64 = false;
if (VT == MVT::i64)
- Opcode = IsPre ? ARM64::LDRXpre_isel : ARM64::LDRXpost_isel;
+ Opcode = IsPre ? ARM64::LDRXpre : ARM64::LDRXpost;
else if (VT == MVT::i32) {
if (ExtType == ISD::NON_EXTLOAD)
- Opcode = IsPre ? ARM64::LDRWpre_isel : ARM64::LDRWpost_isel;
+ Opcode = IsPre ? ARM64::LDRWpre : ARM64::LDRWpost;
else if (ExtType == ISD::SEXTLOAD)
- Opcode = IsPre ? ARM64::LDRSWpre_isel : ARM64::LDRSWpost_isel;
+ Opcode = IsPre ? ARM64::LDRSWpre : ARM64::LDRSWpost;
else {
- Opcode = IsPre ? ARM64::LDRWpre_isel : ARM64::LDRWpost_isel;
+ Opcode = IsPre ? ARM64::LDRWpre : ARM64::LDRWpost;
InsertTo64 = true;
// The result of the load is only i32. It's the subreg_to_reg that makes
// it into an i64.
} else if (VT == MVT::i16) {
if (ExtType == ISD::SEXTLOAD) {
if (DstVT == MVT::i64)
- Opcode = IsPre ? ARM64::LDRSHXpre_isel : ARM64::LDRSHXpost_isel;
+ Opcode = IsPre ? ARM64::LDRSHXpre : ARM64::LDRSHXpost;
else
- Opcode = IsPre ? ARM64::LDRSHWpre_isel : ARM64::LDRSHWpost_isel;
+ Opcode = IsPre ? ARM64::LDRSHWpre : ARM64::LDRSHWpost;
} else {
- Opcode = IsPre ? ARM64::LDRHHpre_isel : ARM64::LDRHHpost_isel;
+ Opcode = IsPre ? ARM64::LDRHHpre : ARM64::LDRHHpost;
InsertTo64 = DstVT == MVT::i64;
// The result of the load is only i32. It's the subreg_to_reg that makes
// it into an i64.
} else if (VT == MVT::i8) {
if (ExtType == ISD::SEXTLOAD) {
if (DstVT == MVT::i64)
- Opcode = IsPre ? ARM64::LDRSBXpre_isel : ARM64::LDRSBXpost_isel;
+ Opcode = IsPre ? ARM64::LDRSBXpre : ARM64::LDRSBXpost;
else
- Opcode = IsPre ? ARM64::LDRSBWpre_isel : ARM64::LDRSBWpost_isel;
+ Opcode = IsPre ? ARM64::LDRSBWpre : ARM64::LDRSBWpost;
} else {
- Opcode = IsPre ? ARM64::LDRBBpre_isel : ARM64::LDRBBpost_isel;
+ Opcode = IsPre ? ARM64::LDRBBpre : ARM64::LDRBBpost;
InsertTo64 = DstVT == MVT::i64;
// The result of the load is only i32. It's the subreg_to_reg that makes
// it into an i64.
DstVT = MVT::i32;
}
} else if (VT == MVT::f32) {
- Opcode = IsPre ? ARM64::LDRSpre_isel : ARM64::LDRSpost_isel;
- } else if (VT == MVT::f64) {
- Opcode = IsPre ? ARM64::LDRDpre_isel : ARM64::LDRDpost_isel;
+ Opcode = IsPre ? ARM64::LDRSpre : ARM64::LDRSpost;
+ } else if (VT == MVT::f64 || VT.is64BitVector()) {
+ Opcode = IsPre ? ARM64::LDRDpre : ARM64::LDRDpost;
+ } else if (VT.is128BitVector()) {
+ Opcode = IsPre ? ARM64::LDRQpre : ARM64::LDRQpost;
} else
- return NULL;
+ return nullptr;
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
int OffsetVal = (int)OffsetOp->getZExtValue();
SDValue Offset = CurDAG->getTargetConstant(OffsetVal, MVT::i64);
SDValue Ops[] = { Base, Offset, Chain };
- SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), DstVT, MVT::i64,
+ SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, DstVT,
MVT::Other, Ops);
// Either way, we're replacing the node, so tell the caller that.
Done = true;
+ SDValue LoadedVal = SDValue(Res, 1);
if (InsertTo64) {
SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
- SDNode *Sub = CurDAG->getMachineNode(
- ARM64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
- CurDAG->getTargetConstant(0, MVT::i64), SDValue(Res, 0), SubReg);
- ReplaceUses(SDValue(N, 0), SDValue(Sub, 0));
- ReplaceUses(SDValue(N, 1), SDValue(Res, 1));
- ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
- return 0;
- }
- return Res;
+ LoadedVal =
+ SDValue(CurDAG->getMachineNode(ARM64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
+ CurDAG->getTargetConstant(0, MVT::i64),
+ LoadedVal, SubReg),
+ 0);
+ }
+
+ ReplaceUses(SDValue(N, 0), LoadedVal);
+ ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
+ ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
+
+ return nullptr;
}
SDNode *ARM64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
SDValue SuperReg = SDValue(Ld, 0);
-
- // MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
- // MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
- // cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
-
- switch (NumVecs) {
- case 4:
- ReplaceUses(SDValue(N, 3), CurDAG->getTargetExtractSubreg(SubRegIdx + 3, dl,
- VT, SuperReg));
- // FALLTHROUGH
- case 3:
- ReplaceUses(SDValue(N, 2), CurDAG->getTargetExtractSubreg(SubRegIdx + 2, dl,
- VT, SuperReg));
- // FALLTHROUGH
- case 2:
- ReplaceUses(SDValue(N, 1), CurDAG->getTargetExtractSubreg(SubRegIdx + 1, dl,
- VT, SuperReg));
- ReplaceUses(SDValue(N, 0),
- CurDAG->getTargetExtractSubreg(SubRegIdx, dl, VT, SuperReg));
- break;
- case 1:
- ReplaceUses(SDValue(N, 0), SuperReg);
- break;
- }
+ for (unsigned i = 0; i < NumVecs; ++i)
+ ReplaceUses(SDValue(N, i),
+ CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
+ return nullptr;
+}
+
+SDNode *ARM64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
+ unsigned Opc, unsigned SubRegIdx) {
+ SDLoc dl(N);
+ EVT VT = N->getValueType(0);
+ SDValue Chain = N->getOperand(0);
+
+ SmallVector<SDValue, 6> Ops;
+ Ops.push_back(N->getOperand(1)); // Mem operand
+ Ops.push_back(N->getOperand(2)); // Incremental
+ Ops.push_back(Chain);
- return 0;
+ std::vector<EVT> ResTys;
+ ResTys.push_back(MVT::i64); // Type of the write back register
+ ResTys.push_back(MVT::Untyped);
+ ResTys.push_back(MVT::Other);
+
+ SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+
+ // Update uses of write back register
+ ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
+
+ // Update uses of vector list
+ SDValue SuperReg = SDValue(Ld, 1);
+ if (NumVecs == 1)
+ ReplaceUses(SDValue(N, 0), SuperReg);
+ else
+ for (unsigned i = 0; i < NumVecs; ++i)
+ ReplaceUses(SDValue(N, i),
+ CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
+
+ // Update the chain
+ ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
+ return nullptr;
}
SDNode *ARM64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
return St;
}
+SDNode *ARM64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
+ SDLoc dl(N);
+ EVT VT = N->getOperand(2)->getValueType(0);
+ SmallVector<EVT, 2> ResTys;
+ ResTys.push_back(MVT::i64); // Type of the write back register
+ ResTys.push_back(MVT::Other); // Type for the Chain
+
+ // Form a REG_SEQUENCE to force register allocation.
+ bool Is128Bit = VT.getSizeInBits() == 128;
+ SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
+ SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
+
+ SmallVector<SDValue, 6> Ops;
+ Ops.push_back(RegSeq);
+ Ops.push_back(N->getOperand(NumVecs + 1)); // base register
+ Ops.push_back(N->getOperand(NumVecs + 2)); // Incremental
+ Ops.push_back(N->getOperand(0)); // Chain
+ SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+
+ return St;
+}
+
/// WidenVector - Given a value in the V64 register class, produce the
/// equivalent value in the V128 register class.
class WidenVector {
SDValue SuperReg = SDValue(Ld, 0);
EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
- switch (NumVecs) {
- case 4: {
- SDValue NV3 =
- CurDAG->getTargetExtractSubreg(ARM64::qsub3, dl, WideVT, SuperReg);
+ static unsigned QSubs[] = { ARM64::qsub0, ARM64::qsub1, ARM64::qsub2,
+ ARM64::qsub3 };
+ for (unsigned i = 0; i < NumVecs; ++i) {
+ SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
if (Narrow)
- ReplaceUses(SDValue(N, 3), NarrowVector(NV3, *CurDAG));
- else
- ReplaceUses(SDValue(N, 3), NV3);
+ NV = NarrowVector(NV, *CurDAG);
+ ReplaceUses(SDValue(N, i), NV);
}
- // FALLTHROUGH
- case 3: {
- SDValue NV2 =
- CurDAG->getTargetExtractSubreg(ARM64::qsub2, dl, WideVT, SuperReg);
- if (Narrow)
- ReplaceUses(SDValue(N, 2), NarrowVector(NV2, *CurDAG));
- else
- ReplaceUses(SDValue(N, 2), NV2);
- }
- // FALLTHROUGH
- case 2: {
- SDValue NV1 =
- CurDAG->getTargetExtractSubreg(ARM64::qsub1, dl, WideVT, SuperReg);
- SDValue NV0 =
- CurDAG->getTargetExtractSubreg(ARM64::qsub0, dl, WideVT, SuperReg);
- if (Narrow) {
- ReplaceUses(SDValue(N, 1), NarrowVector(NV1, *CurDAG));
- ReplaceUses(SDValue(N, 0), NarrowVector(NV0, *CurDAG));
- } else {
- ReplaceUses(SDValue(N, 1), NV1);
- ReplaceUses(SDValue(N, 0), NV0);
+
+ ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
+
+ return Ld;
+}
+
+SDNode *ARM64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
+ SDLoc dl(N);
+ EVT VT = N->getValueType(0);
+ bool Narrow = VT.getSizeInBits() == 64;
+
+ // Form a REG_SEQUENCE to force register allocation.
+ SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
+
+ if (Narrow)
+ std::transform(Regs.begin(), Regs.end(), Regs.begin(),
+ WidenVector(*CurDAG));
+
+ SDValue RegSeq = createQTuple(Regs);
+
+ std::vector<EVT> ResTys;
+ ResTys.push_back(MVT::i64); // Type of the write back register
+ ResTys.push_back(MVT::Untyped);
+ ResTys.push_back(MVT::Other);
+
+ unsigned LaneNo =
+ cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
+
+ SmallVector<SDValue, 6> Ops;
+ Ops.push_back(RegSeq);
+ Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64)); // Lane Number
+ Ops.push_back(N->getOperand(NumVecs + 2)); // Base register
+ Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
+ Ops.push_back(N->getOperand(0));
+ SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+
+ // Update uses of the write back register
+ ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
+
+ // Update uses of the vector list
+ SDValue SuperReg = SDValue(Ld, 1);
+ if (NumVecs == 1) {
+ ReplaceUses(SDValue(N, 0),
+ Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
+ } else {
+ EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
+ static unsigned QSubs[] = { ARM64::qsub0, ARM64::qsub1, ARM64::qsub2,
+ ARM64::qsub3 };
+ for (unsigned i = 0; i < NumVecs; ++i) {
+ SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
+ SuperReg);
+ if (Narrow)
+ NV = NarrowVector(NV, *CurDAG);
+ ReplaceUses(SDValue(N, i), NV);
}
- break;
- }
}
- ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
+ // Update the Chain
+ ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
return Ld;
}
return St;
}
-SDNode *ARM64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
- unsigned Op16, unsigned Op32,
- unsigned Op64) {
- // Mostly direct translation to the given operations, except that we preserve
- // the AtomicOrdering for use later on.
- AtomicSDNode *AN = cast<AtomicSDNode>(Node);
- EVT VT = AN->getMemoryVT();
-
- unsigned Op;
- if (VT == MVT::i8)
- Op = Op8;
- else if (VT == MVT::i16)
- Op = Op16;
- else if (VT == MVT::i32)
- Op = Op32;
- else if (VT == MVT::i64)
- Op = Op64;
- else
- llvm_unreachable("Unexpected atomic operation");
+SDNode *ARM64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
+ unsigned Opc) {
+ SDLoc dl(N);
+ EVT VT = N->getOperand(2)->getValueType(0);
+ bool Narrow = VT.getSizeInBits() == 64;
- SmallVector<SDValue, 4> Ops;
- for (unsigned i = 1; i < AN->getNumOperands(); ++i)
- Ops.push_back(AN->getOperand(i));
+ // Form a REG_SEQUENCE to force register allocation.
+ SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
- Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
- Ops.push_back(AN->getOperand(0)); // Chain moves to the end
+ if (Narrow)
+ std::transform(Regs.begin(), Regs.end(), Regs.begin(),
+ WidenVector(*CurDAG));
- return CurDAG->SelectNodeTo(Node, Op, AN->getValueType(0), MVT::Other,
- &Ops[0], Ops.size());
+ SDValue RegSeq = createQTuple(Regs);
+
+ SmallVector<EVT, 2> ResTys;
+ ResTys.push_back(MVT::i64); // Type of the write back register
+ ResTys.push_back(MVT::Other);
+
+ unsigned LaneNo =
+ cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
+
+ SmallVector<SDValue, 6> Ops;
+ Ops.push_back(RegSeq);
+ Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
+ Ops.push_back(N->getOperand(NumVecs + 2)); // Base Register
+ Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
+ Ops.push_back(N->getOperand(0));
+ SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
+
+ // Transfer memoperands.
+ MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
+ MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
+ cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
+
+ return St;
}
static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
// Make sure to clamp the MSB so that we preserve the semantics of the
// original operations.
ClampMSB = true;
+ } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
+ isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
+ Srl_imm)) {
+ // If the shift result was truncated, we can still combine them.
+ Opd0 = Op0->getOperand(0).getOperand(0);
+
+ // Use the type of SRL node.
+ VT = Opd0->getValueType(0);
} else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
Opd0 = Op0->getOperand(0);
} else if (BiggerPattern) {
// we're looking for a shift of a shift
uint64_t Shl_imm = 0;
+ uint64_t Trunc_bits = 0;
if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
Opd0 = N->getOperand(0).getOperand(0);
+ } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
+ N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
+ // We are looking for a shift of truncate. Truncate from i64 to i32 could
+ // be considered as setting high 32 bits as zero. Our strategy here is to
+ // always generate 64bit UBFM. This consistency will help the CSE pass
+ // later find more redundancy.
+ Opd0 = N->getOperand(0).getOperand(0);
+ Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
+ VT = Opd0->getValueType(0);
+ assert(VT == MVT::i64 && "the promoted type should be i64");
} else if (BiggerPattern) {
// Let's pretend a 0 shift left has been performed.
// FIXME: Currently we limit this to the bigger pattern case,
assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
"bad amount in shift node!");
// Note: The width operand is encoded as width-1.
- unsigned Width = VT.getSizeInBits() - Srl_imm - 1;
+ unsigned Width = VT.getSizeInBits() - Trunc_bits - Srl_imm - 1;
int sLSB = Srl_imm - Shl_imm;
if (sLSB < 0)
return false;
unsigned Opc, LSB, MSB;
SDValue Opd0;
if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
- return NULL;
+ return nullptr;
EVT VT = N->getValueType(0);
- SDValue Ops[] = { Opd0, CurDAG->getTargetConstant(LSB, VT),
- CurDAG->getTargetConstant(MSB, VT) };
- return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 3);
+
+ // If the bit extract operation is 64bit but the original type is 32bit, we
+ // need to add one EXTRACT_SUBREG.
+ if ((Opc == ARM64::SBFMXri || Opc == ARM64::UBFMXri) && VT == MVT::i32) {
+ SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, MVT::i64),
+ CurDAG->getTargetConstant(MSB, MVT::i64)};
+
+ SDNode *BFM = CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i64, Ops64);
+ SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
+ MachineSDNode *Node =
+ CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32,
+ SDValue(BFM, 0), SubReg);
+ return Node;
+ }
+
+ SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, VT),
+ CurDAG->getTargetConstant(MSB, VT)};
+ return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
}
-// Is mask a i32 or i64 binary sequence 1..10..0 and
-// CountTrailingZeros(mask) == ExpectedTrailingZeros
-static bool isHighMask(uint64_t Mask, unsigned ExpectedTrailingZeros,
- unsigned NumberOfIgnoredHighBits, EVT VT) {
+/// Does DstMask form a complementary pair with the mask provided by
+/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
+/// this asks whether DstMask zeroes precisely those bits that will be set by
+/// the other half.
+static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
+ unsigned NumberOfIgnoredHighBits, EVT VT) {
assert((VT == MVT::i32 || VT == MVT::i64) &&
"i32 or i64 mask type expected!");
+ unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
- uint64_t ExpectedMask;
- if (VT == MVT::i32) {
- uint32_t ExpectedMaski32 = ~0 << ExpectedTrailingZeros;
- ExpectedMask = ExpectedMaski32;
- if (NumberOfIgnoredHighBits) {
- uint32_t highMask = ~0 << (32 - NumberOfIgnoredHighBits);
- Mask |= highMask;
- }
- } else {
- ExpectedMask = ((uint64_t) ~0) << ExpectedTrailingZeros;
- if (NumberOfIgnoredHighBits)
- Mask |= ((uint64_t) ~0) << (64 - NumberOfIgnoredHighBits);
- }
+ APInt SignificantDstMask = APInt(BitWidth, DstMask);
+ APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
- return Mask == ExpectedMask;
+ return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
+ (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
}
// Look for bits that will be useful for later uses.
UsefulBits &= UsersUsefulBits;
}
+/// Create a machine node performing a notional SHL of Op by ShlAmount. If
+/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
+/// 0, return Op unchanged.
+static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
+ if (ShlAmount == 0)
+ return Op;
+
+ EVT VT = Op.getValueType();
+ unsigned BitWidth = VT.getSizeInBits();
+ unsigned UBFMOpc = BitWidth == 32 ? ARM64::UBFMWri : ARM64::UBFMXri;
+
+ SDNode *ShiftNode;
+ if (ShlAmount > 0) {
+ // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
+ ShiftNode = CurDAG->getMachineNode(
+ UBFMOpc, SDLoc(Op), VT, Op,
+ CurDAG->getTargetConstant(BitWidth - ShlAmount, VT),
+ CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, VT));
+ } else {
+ // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
+ assert(ShlAmount < 0 && "expected right shift");
+ int ShrAmount = -ShlAmount;
+ ShiftNode = CurDAG->getMachineNode(
+ UBFMOpc, SDLoc(Op), VT, Op, CurDAG->getTargetConstant(ShrAmount, VT),
+ CurDAG->getTargetConstant(BitWidth - 1, VT));
+ }
+
+ return SDValue(ShiftNode, 0);
+}
+
+/// Does this tree qualify as an attempt to move a bitfield into position,
+/// essentially "(and (shl VAL, N), Mask)".
+static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
+ SDValue &Src, int &ShiftAmount,
+ int &MaskWidth) {
+ EVT VT = Op.getValueType();
+ unsigned BitWidth = VT.getSizeInBits();
+ (void)BitWidth;
+ assert(BitWidth == 32 || BitWidth == 64);
+
+ APInt KnownZero, KnownOne;
+ CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
+
+ // Non-zero in the sense that they're not provably zero, which is the key
+ // point if we want to use this value
+ uint64_t NonZeroBits = (~KnownZero).getZExtValue();
+
+ // Discard a constant AND mask if present. It's safe because the node will
+ // already have been factored into the computeKnownBits calculation above.
+ uint64_t AndImm;
+ if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
+ assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
+ Op = Op.getOperand(0);
+ }
+
+ uint64_t ShlImm;
+ if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
+ return false;
+ Op = Op.getOperand(0);
+
+ if (!isShiftedMask_64(NonZeroBits))
+ return false;
+
+ ShiftAmount = countTrailingZeros(NonZeroBits);
+ MaskWidth = CountTrailingOnes_64(NonZeroBits >> ShiftAmount);
+
+ // BFI encompasses sufficiently many nodes that it's worth inserting an extra
+ // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
+ // amount.
+ Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
+
+ return true;
+}
+
// Given a OR operation, check if we have the following pattern
// ubfm c, b, imm, imm2 (or something that does the same jobs, see
// isBitfieldExtractOp)
// if yes, given reference arguments will be update so that one can replace
// the OR instruction with:
// f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
-static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Opd0,
- SDValue &Opd1, unsigned &LSB,
- unsigned &MSB, SelectionDAG *CurDAG) {
+static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
+ SDValue &Src, unsigned &ImmR,
+ unsigned &ImmS, SelectionDAG *CurDAG) {
assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
// Set Opc
for (int i = 0; i < 2;
++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
unsigned BFXOpc;
- // Set Opd1, LSB and MSB arguments by looking for
- // c = ubfm b, imm, imm2
- if (!isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Opd1, LSB, MSB,
- NumberOfIgnoredLowBits, true))
- continue;
-
- // Check that the returned opcode is compatible with the pattern,
- // i.e., same type and zero extended (U and not S)
- if ((BFXOpc != ARM64::UBFMXri && VT == MVT::i64) ||
- (BFXOpc != ARM64::UBFMWri && VT == MVT::i32))
- continue;
-
- // Compute the width of the bitfield insertion
- int sMSB = MSB - LSB + 1;
- // FIXME: This constraints is to catch bitfield insertion we may
- // want to widen the pattern if we want to grab general bitfied
- // move case
- if (sMSB <= 0)
+ int DstLSB, Width;
+ if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
+ NumberOfIgnoredLowBits, true)) {
+ // Check that the returned opcode is compatible with the pattern,
+ // i.e., same type and zero extended (U and not S)
+ if ((BFXOpc != ARM64::UBFMXri && VT == MVT::i64) ||
+ (BFXOpc != ARM64::UBFMWri && VT == MVT::i32))
+ continue;
+
+ // Compute the width of the bitfield insertion
+ DstLSB = 0;
+ Width = ImmS - ImmR + 1;
+ // FIXME: This constraint is to catch bitfield insertion we may
+ // want to widen the pattern if we want to grab general bitfied
+ // move case
+ if (Width <= 0)
+ continue;
+
+ // If the mask on the insertee is correct, we have a BFXIL operation. We
+ // can share the ImmR and ImmS values from the already-computed UBFM.
+ } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
+ DstLSB, Width)) {
+ ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
+ ImmS = Width - 1;
+ } else
continue;
// Check the second part of the pattern
EVT VT = OrOpd1->getValueType(0);
- if (VT != MVT::i32 && VT != MVT::i64)
- continue;
+ assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
// Compute the Known Zero for the candidate of the first operand.
// This allows to catch more general case than just looking for
// AND with imm. Indeed, simplify-demanded-bits may have removed
// the AND instruction because it proves it was useless.
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(OrOpd1Val, KnownZero, KnownOne);
+ CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
// Check if there is enough room for the second operand to appear
// in the first one
- if (KnownZero.countTrailingOnes() < (unsigned)sMSB)
+ APInt BitsToBeInserted =
+ APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
+
+ if ((BitsToBeInserted & ~KnownZero) != 0)
continue;
// Set the first operand
uint64_t Imm;
if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
- isHighMask(Imm, sMSB, NumberOfIgnoredHighBits, VT))
+ isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
// In that case, we can eliminate the AND
- Opd0 = OrOpd1->getOperand(0);
+ Dst = OrOpd1->getOperand(0);
else
// Maybe the AND has been removed by simplify-demanded-bits
// or is useful because it discards more bits
- Opd0 = OrOpd1Val;
+ Dst = OrOpd1Val;
// both parts match
return true;
SDNode *ARM64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
if (N->getOpcode() != ISD::OR)
- return NULL;
+ return nullptr;
unsigned Opc;
unsigned LSB, MSB;
SDValue Opd0, Opd1;
if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
- return NULL;
+ return nullptr;
EVT VT = N->getValueType(0);
SDValue Ops[] = { Opd0,
Opd1,
CurDAG->getTargetConstant(LSB, VT),
CurDAG->getTargetConstant(MSB, VT) };
- return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 4);
+ return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
}
SDNode *ARM64DAGToDAGISel::SelectLIBM(SDNode *N) {
} else if (VT == MVT::f64) {
Variant = 1;
} else
- return 0; // Unrecognized argument type. Fall back on default codegen.
+ return nullptr; // Unrecognized argument type. Fall back on default codegen.
// Pick the FRINTX variant needed to set the flags.
unsigned FRINTXOpc = FRINTXOpcs[Variant];
switch (N->getOpcode()) {
default:
- return 0; // Unrecognized libm ISD node. Fall back on default codegen.
+ return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
case ISD::FCEIL: {
unsigned FRINTPOpcs[] = { ARM64::FRINTPSr, ARM64::FRINTPDr };
Opc = FRINTPOpcs[Variant];
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
- return NULL;
+ return nullptr;
}
// Few custom selection stuff.
- SDNode *ResNode = 0;
+ SDNode *ResNode = nullptr;
EVT VT = Node->getValueType(0);
switch (Node->getOpcode()) {
return I;
break;
- case ISD::ATOMIC_LOAD_ADD:
- return SelectAtomic(Node, ARM64::ATOMIC_LOAD_ADD_I8,
- ARM64::ATOMIC_LOAD_ADD_I16, ARM64::ATOMIC_LOAD_ADD_I32,
- ARM64::ATOMIC_LOAD_ADD_I64);
- case ISD::ATOMIC_LOAD_SUB:
- return SelectAtomic(Node, ARM64::ATOMIC_LOAD_SUB_I8,
- ARM64::ATOMIC_LOAD_SUB_I16, ARM64::ATOMIC_LOAD_SUB_I32,
- ARM64::ATOMIC_LOAD_SUB_I64);
- case ISD::ATOMIC_LOAD_AND:
- return SelectAtomic(Node, ARM64::ATOMIC_LOAD_AND_I8,
- ARM64::ATOMIC_LOAD_AND_I16, ARM64::ATOMIC_LOAD_AND_I32,
- ARM64::ATOMIC_LOAD_AND_I64);
- case ISD::ATOMIC_LOAD_OR:
- return SelectAtomic(Node, ARM64::ATOMIC_LOAD_OR_I8,
- ARM64::ATOMIC_LOAD_OR_I16, ARM64::ATOMIC_LOAD_OR_I32,
- ARM64::ATOMIC_LOAD_OR_I64);
- case ISD::ATOMIC_LOAD_XOR:
- return SelectAtomic(Node, ARM64::ATOMIC_LOAD_XOR_I8,
- ARM64::ATOMIC_LOAD_XOR_I16, ARM64::ATOMIC_LOAD_XOR_I32,
- ARM64::ATOMIC_LOAD_XOR_I64);
- case ISD::ATOMIC_LOAD_NAND:
- return SelectAtomic(
- Node, ARM64::ATOMIC_LOAD_NAND_I8, ARM64::ATOMIC_LOAD_NAND_I16,
- ARM64::ATOMIC_LOAD_NAND_I32, ARM64::ATOMIC_LOAD_NAND_I64);
- case ISD::ATOMIC_LOAD_MIN:
- return SelectAtomic(Node, ARM64::ATOMIC_LOAD_MIN_I8,
- ARM64::ATOMIC_LOAD_MIN_I16, ARM64::ATOMIC_LOAD_MIN_I32,
- ARM64::ATOMIC_LOAD_MIN_I64);
- case ISD::ATOMIC_LOAD_MAX:
- return SelectAtomic(Node, ARM64::ATOMIC_LOAD_MAX_I8,
- ARM64::ATOMIC_LOAD_MAX_I16, ARM64::ATOMIC_LOAD_MAX_I32,
- ARM64::ATOMIC_LOAD_MAX_I64);
- case ISD::ATOMIC_LOAD_UMIN:
- return SelectAtomic(
- Node, ARM64::ATOMIC_LOAD_UMIN_I8, ARM64::ATOMIC_LOAD_UMIN_I16,
- ARM64::ATOMIC_LOAD_UMIN_I32, ARM64::ATOMIC_LOAD_UMIN_I64);
- case ISD::ATOMIC_LOAD_UMAX:
- return SelectAtomic(
- Node, ARM64::ATOMIC_LOAD_UMAX_I8, ARM64::ATOMIC_LOAD_UMAX_I16,
- ARM64::ATOMIC_LOAD_UMAX_I32, ARM64::ATOMIC_LOAD_UMAX_I64);
- case ISD::ATOMIC_SWAP:
- return SelectAtomic(Node, ARM64::ATOMIC_SWAP_I8, ARM64::ATOMIC_SWAP_I16,
- ARM64::ATOMIC_SWAP_I32, ARM64::ATOMIC_SWAP_I64);
- case ISD::ATOMIC_CMP_SWAP:
- return SelectAtomic(Node, ARM64::ATOMIC_CMP_SWAP_I8,
- ARM64::ATOMIC_CMP_SWAP_I16, ARM64::ATOMIC_CMP_SWAP_I32,
- ARM64::ATOMIC_CMP_SWAP_I64);
-
case ISD::LOAD: {
// Try to select as an indexed load. Fall through to normal processing
// if we can't.
SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
CurDAG->getTargetConstant(Shifter, MVT::i32) };
- return CurDAG->SelectNodeTo(Node, ARM64::ADDXri, MVT::i64, Ops, 3);
+ return CurDAG->SelectNodeTo(Node, ARM64::ADDXri, MVT::i64, Ops);
}
case ISD::INTRINSIC_W_CHAIN: {
unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
switch (IntNo) {
default:
break;
+ case Intrinsic::arm64_ldaxp:
case Intrinsic::arm64_ldxp: {
+ unsigned Op =
+ IntNo == Intrinsic::arm64_ldaxp ? ARM64::LDAXPX : ARM64::LDXPX;
SDValue MemAddr = Node->getOperand(2);
SDLoc DL(Node);
SDValue Chain = Node->getOperand(0);
- SDNode *Ld = CurDAG->getMachineNode(ARM64::LDXPX, DL, MVT::i64, MVT::i64,
+ SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
MVT::Other, MemAddr, Chain);
// Transfer memoperands.
cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
return Ld;
}
+ case Intrinsic::arm64_stlxp:
case Intrinsic::arm64_stxp: {
+ unsigned Op =
+ IntNo == Intrinsic::arm64_stlxp ? ARM64::STLXPX : ARM64::STXPX;
SDLoc DL(Node);
SDValue Chain = Node->getOperand(0);
SDValue ValLo = Node->getOperand(2);
Ops.push_back(MemAddr);
Ops.push_back(Chain);
- SDNode *St =
- CurDAG->getMachineNode(ARM64::STXPX, DL, MVT::i32, MVT::Other, Ops);
+ SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
// Transfer memoperands.
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
}
}
}
+ case ARM64ISD::LD2post: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 2, ARM64::LD2Twov8b_POST, ARM64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 2, ARM64::LD2Twov16b_POST, ARM64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 2, ARM64::LD2Twov4h_POST, ARM64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 2, ARM64::LD2Twov8h_POST, ARM64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 2, ARM64::LD2Twov2s_POST, ARM64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 2, ARM64::LD2Twov4s_POST, ARM64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 2, ARM64::LD1Twov1d_POST, ARM64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 2, ARM64::LD2Twov2d_POST, ARM64::qsub0);
+ break;
+ }
+ case ARM64ISD::LD3post: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 3, ARM64::LD3Threev8b_POST, ARM64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 3, ARM64::LD3Threev16b_POST, ARM64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 3, ARM64::LD3Threev4h_POST, ARM64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 3, ARM64::LD3Threev8h_POST, ARM64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 3, ARM64::LD3Threev2s_POST, ARM64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 3, ARM64::LD3Threev4s_POST, ARM64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 3, ARM64::LD1Threev1d_POST, ARM64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 3, ARM64::LD3Threev2d_POST, ARM64::qsub0);
+ break;
+ }
+ case ARM64ISD::LD4post: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 4, ARM64::LD4Fourv8b_POST, ARM64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 4, ARM64::LD4Fourv16b_POST, ARM64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 4, ARM64::LD4Fourv4h_POST, ARM64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 4, ARM64::LD4Fourv8h_POST, ARM64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 4, ARM64::LD4Fourv2s_POST, ARM64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 4, ARM64::LD4Fourv4s_POST, ARM64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 4, ARM64::LD1Fourv1d_POST, ARM64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 4, ARM64::LD4Fourv2d_POST, ARM64::qsub0);
+ break;
+ }
+ case ARM64ISD::LD1x2post: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 2, ARM64::LD1Twov8b_POST, ARM64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 2, ARM64::LD1Twov16b_POST, ARM64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 2, ARM64::LD1Twov4h_POST, ARM64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 2, ARM64::LD1Twov8h_POST, ARM64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 2, ARM64::LD1Twov2s_POST, ARM64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 2, ARM64::LD1Twov4s_POST, ARM64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 2, ARM64::LD1Twov1d_POST, ARM64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 2, ARM64::LD1Twov2d_POST, ARM64::qsub0);
+ break;
+ }
+ case ARM64ISD::LD1x3post: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 3, ARM64::LD1Threev8b_POST, ARM64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 3, ARM64::LD1Threev16b_POST, ARM64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 3, ARM64::LD1Threev4h_POST, ARM64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 3, ARM64::LD1Threev8h_POST, ARM64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 3, ARM64::LD1Threev2s_POST, ARM64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 3, ARM64::LD1Threev4s_POST, ARM64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 3, ARM64::LD1Threev1d_POST, ARM64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 3, ARM64::LD1Threev2d_POST, ARM64::qsub0);
+ break;
+ }
+ case ARM64ISD::LD1x4post: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 4, ARM64::LD1Fourv8b_POST, ARM64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 4, ARM64::LD1Fourv16b_POST, ARM64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 4, ARM64::LD1Fourv4h_POST, ARM64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 4, ARM64::LD1Fourv8h_POST, ARM64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 4, ARM64::LD1Fourv2s_POST, ARM64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 4, ARM64::LD1Fourv4s_POST, ARM64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 4, ARM64::LD1Fourv1d_POST, ARM64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 4, ARM64::LD1Fourv2d_POST, ARM64::qsub0);
+ break;
+ }
+ case ARM64ISD::LD1DUPpost: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 1, ARM64::LD1Rv8b_POST, ARM64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 1, ARM64::LD1Rv16b_POST, ARM64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 1, ARM64::LD1Rv4h_POST, ARM64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 1, ARM64::LD1Rv8h_POST, ARM64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 1, ARM64::LD1Rv2s_POST, ARM64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 1, ARM64::LD1Rv4s_POST, ARM64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 1, ARM64::LD1Rv1d_POST, ARM64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 1, ARM64::LD1Rv2d_POST, ARM64::qsub0);
+ break;
+ }
+ case ARM64ISD::LD2DUPpost: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 2, ARM64::LD2Rv8b_POST, ARM64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 2, ARM64::LD2Rv16b_POST, ARM64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 2, ARM64::LD2Rv4h_POST, ARM64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 2, ARM64::LD2Rv8h_POST, ARM64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 2, ARM64::LD2Rv2s_POST, ARM64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 2, ARM64::LD2Rv4s_POST, ARM64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 2, ARM64::LD2Rv1d_POST, ARM64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 2, ARM64::LD2Rv2d_POST, ARM64::qsub0);
+ break;
+ }
+ case ARM64ISD::LD3DUPpost: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 3, ARM64::LD3Rv8b_POST, ARM64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 3, ARM64::LD3Rv16b_POST, ARM64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 3, ARM64::LD3Rv4h_POST, ARM64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 3, ARM64::LD3Rv8h_POST, ARM64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 3, ARM64::LD3Rv2s_POST, ARM64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 3, ARM64::LD3Rv4s_POST, ARM64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 3, ARM64::LD3Rv1d_POST, ARM64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 3, ARM64::LD3Rv2d_POST, ARM64::qsub0);
+ break;
+ }
+ case ARM64ISD::LD4DUPpost: {
+ if (VT == MVT::v8i8)
+ return SelectPostLoad(Node, 4, ARM64::LD4Rv8b_POST, ARM64::dsub0);
+ else if (VT == MVT::v16i8)
+ return SelectPostLoad(Node, 4, ARM64::LD4Rv16b_POST, ARM64::qsub0);
+ else if (VT == MVT::v4i16)
+ return SelectPostLoad(Node, 4, ARM64::LD4Rv4h_POST, ARM64::dsub0);
+ else if (VT == MVT::v8i16)
+ return SelectPostLoad(Node, 4, ARM64::LD4Rv8h_POST, ARM64::qsub0);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostLoad(Node, 4, ARM64::LD4Rv2s_POST, ARM64::dsub0);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostLoad(Node, 4, ARM64::LD4Rv4s_POST, ARM64::qsub0);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostLoad(Node, 4, ARM64::LD4Rv1d_POST, ARM64::dsub0);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostLoad(Node, 4, ARM64::LD4Rv2d_POST, ARM64::qsub0);
+ break;
+ }
+ case ARM64ISD::LD1LANEpost: {
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostLoadLane(Node, 1, ARM64::LD1i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostLoadLane(Node, 1, ARM64::LD1i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostLoadLane(Node, 1, ARM64::LD1i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostLoadLane(Node, 1, ARM64::LD1i64_POST);
+ break;
+ }
+ case ARM64ISD::LD2LANEpost: {
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostLoadLane(Node, 2, ARM64::LD2i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostLoadLane(Node, 2, ARM64::LD2i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostLoadLane(Node, 2, ARM64::LD2i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostLoadLane(Node, 2, ARM64::LD2i64_POST);
+ break;
+ }
+ case ARM64ISD::LD3LANEpost: {
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostLoadLane(Node, 3, ARM64::LD3i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostLoadLane(Node, 3, ARM64::LD3i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostLoadLane(Node, 3, ARM64::LD3i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostLoadLane(Node, 3, ARM64::LD3i64_POST);
+ break;
+ }
+ case ARM64ISD::LD4LANEpost: {
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostLoadLane(Node, 4, ARM64::LD4i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostLoadLane(Node, 4, ARM64::LD4i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostLoadLane(Node, 4, ARM64::LD4i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostLoadLane(Node, 4, ARM64::LD4i64_POST);
+ break;
+ }
+ case ARM64ISD::ST2post: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v8i8)
+ return SelectPostStore(Node, 2, ARM64::ST2Twov8b_POST);
+ else if (VT == MVT::v16i8)
+ return SelectPostStore(Node, 2, ARM64::ST2Twov16b_POST);
+ else if (VT == MVT::v4i16)
+ return SelectPostStore(Node, 2, ARM64::ST2Twov4h_POST);
+ else if (VT == MVT::v8i16)
+ return SelectPostStore(Node, 2, ARM64::ST2Twov8h_POST);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostStore(Node, 2, ARM64::ST2Twov2s_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostStore(Node, 2, ARM64::ST2Twov4s_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostStore(Node, 2, ARM64::ST2Twov2d_POST);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostStore(Node, 2, ARM64::ST1Twov1d_POST);
+ break;
+ }
+ case ARM64ISD::ST3post: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v8i8)
+ return SelectPostStore(Node, 3, ARM64::ST3Threev8b_POST);
+ else if (VT == MVT::v16i8)
+ return SelectPostStore(Node, 3, ARM64::ST3Threev16b_POST);
+ else if (VT == MVT::v4i16)
+ return SelectPostStore(Node, 3, ARM64::ST3Threev4h_POST);
+ else if (VT == MVT::v8i16)
+ return SelectPostStore(Node, 3, ARM64::ST3Threev8h_POST);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostStore(Node, 3, ARM64::ST3Threev2s_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostStore(Node, 3, ARM64::ST3Threev4s_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostStore(Node, 3, ARM64::ST3Threev2d_POST);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostStore(Node, 3, ARM64::ST1Threev1d_POST);
+ break;
+ }
+ case ARM64ISD::ST4post: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v8i8)
+ return SelectPostStore(Node, 4, ARM64::ST4Fourv8b_POST);
+ else if (VT == MVT::v16i8)
+ return SelectPostStore(Node, 4, ARM64::ST4Fourv16b_POST);
+ else if (VT == MVT::v4i16)
+ return SelectPostStore(Node, 4, ARM64::ST4Fourv4h_POST);
+ else if (VT == MVT::v8i16)
+ return SelectPostStore(Node, 4, ARM64::ST4Fourv8h_POST);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostStore(Node, 4, ARM64::ST4Fourv2s_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostStore(Node, 4, ARM64::ST4Fourv4s_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostStore(Node, 4, ARM64::ST4Fourv2d_POST);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostStore(Node, 4, ARM64::ST1Fourv1d_POST);
+ break;
+ }
+ case ARM64ISD::ST1x2post: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v8i8)
+ return SelectPostStore(Node, 2, ARM64::ST1Twov8b_POST);
+ else if (VT == MVT::v16i8)
+ return SelectPostStore(Node, 2, ARM64::ST1Twov16b_POST);
+ else if (VT == MVT::v4i16)
+ return SelectPostStore(Node, 2, ARM64::ST1Twov4h_POST);
+ else if (VT == MVT::v8i16)
+ return SelectPostStore(Node, 2, ARM64::ST1Twov8h_POST);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostStore(Node, 2, ARM64::ST1Twov2s_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostStore(Node, 2, ARM64::ST1Twov4s_POST);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostStore(Node, 2, ARM64::ST1Twov1d_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostStore(Node, 2, ARM64::ST1Twov2d_POST);
+ break;
+ }
+ case ARM64ISD::ST1x3post: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v8i8)
+ return SelectPostStore(Node, 3, ARM64::ST1Threev8b_POST);
+ else if (VT == MVT::v16i8)
+ return SelectPostStore(Node, 3, ARM64::ST1Threev16b_POST);
+ else if (VT == MVT::v4i16)
+ return SelectPostStore(Node, 3, ARM64::ST1Threev4h_POST);
+ else if (VT == MVT::v8i16)
+ return SelectPostStore(Node, 3, ARM64::ST1Threev8h_POST);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostStore(Node, 3, ARM64::ST1Threev2s_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostStore(Node, 3, ARM64::ST1Threev4s_POST);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostStore(Node, 3, ARM64::ST1Threev1d_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostStore(Node, 3, ARM64::ST1Threev2d_POST);
+ break;
+ }
+ case ARM64ISD::ST1x4post: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v8i8)
+ return SelectPostStore(Node, 4, ARM64::ST1Fourv8b_POST);
+ else if (VT == MVT::v16i8)
+ return SelectPostStore(Node, 4, ARM64::ST1Fourv16b_POST);
+ else if (VT == MVT::v4i16)
+ return SelectPostStore(Node, 4, ARM64::ST1Fourv4h_POST);
+ else if (VT == MVT::v8i16)
+ return SelectPostStore(Node, 4, ARM64::ST1Fourv8h_POST);
+ else if (VT == MVT::v2i32 || VT == MVT::v2f32)
+ return SelectPostStore(Node, 4, ARM64::ST1Fourv2s_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return SelectPostStore(Node, 4, ARM64::ST1Fourv4s_POST);
+ else if (VT == MVT::v1i64 || VT == MVT::v1f64)
+ return SelectPostStore(Node, 4, ARM64::ST1Fourv1d_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v2f64)
+ return SelectPostStore(Node, 4, ARM64::ST1Fourv2d_POST);
+ break;
+ }
+ case ARM64ISD::ST2LANEpost: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostStoreLane(Node, 2, ARM64::ST2i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostStoreLane(Node, 2, ARM64::ST2i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostStoreLane(Node, 2, ARM64::ST2i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostStoreLane(Node, 2, ARM64::ST2i64_POST);
+ break;
+ }
+ case ARM64ISD::ST3LANEpost: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostStoreLane(Node, 3, ARM64::ST3i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostStoreLane(Node, 3, ARM64::ST3i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostStoreLane(Node, 3, ARM64::ST3i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostStoreLane(Node, 3, ARM64::ST3i64_POST);
+ break;
+ }
+ case ARM64ISD::ST4LANEpost: {
+ VT = Node->getOperand(1).getValueType();
+ if (VT == MVT::v16i8 || VT == MVT::v8i8)
+ return SelectPostStoreLane(Node, 4, ARM64::ST4i8_POST);
+ else if (VT == MVT::v8i16 || VT == MVT::v4i16)
+ return SelectPostStoreLane(Node, 4, ARM64::ST4i16_POST);
+ else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
+ VT == MVT::v2f32)
+ return SelectPostStoreLane(Node, 4, ARM64::ST4i32_POST);
+ else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
+ VT == MVT::v1f64)
+ return SelectPostStoreLane(Node, 4, ARM64::ST4i64_POST);
+ break;
+ }
case ISD::FCEIL:
case ISD::FFLOOR:
ResNode = SelectCode(Node);
DEBUG(errs() << "=> ");
- if (ResNode == NULL || ResNode == Node)
+ if (ResNode == nullptr || ResNode == Node)
DEBUG(Node->dump(CurDAG));
else
DEBUG(ResNode->dump(CurDAG));