1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the AArch64 target.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64TargetMachine.h"
15 #include "MCTargetDesc/AArch64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
28 #define DEBUG_TYPE "aarch64-isel"
30 //===--------------------------------------------------------------------===//
31 /// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
32 /// instructions for SelectionDAG operations.
36 class AArch64DAGToDAGISel : public SelectionDAGISel {
37 AArch64TargetMachine &TM;
39 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const AArch64Subtarget *Subtarget;
46 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
47 CodeGenOpt::Level OptLevel)
48 : SelectionDAGISel(tm, OptLevel), TM(tm), Subtarget(nullptr),
51 const char *getPassName() const override {
52 return "AArch64 Instruction Selection";
55 bool runOnMachineFunction(MachineFunction &MF) override {
57 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
58 MF.getFunction()->hasFnAttribute(Attribute::MinSize);
59 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
60 return SelectionDAGISel::runOnMachineFunction(MF);
63 SDNode *Select(SDNode *Node) override;
65 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
66 /// inline asm expressions.
67 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
68 unsigned ConstraintID,
69 std::vector<SDValue> &OutOps) override;
71 SDNode *SelectMLAV64LaneV128(SDNode *N);
72 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
73 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
74 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
75 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
76 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
77 return SelectShiftedRegister(N, false, Reg, Shift);
79 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
80 return SelectShiftedRegister(N, true, Reg, Shift);
82 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
83 return SelectAddrModeIndexed(N, 1, Base, OffImm);
85 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
86 return SelectAddrModeIndexed(N, 2, Base, OffImm);
88 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
89 return SelectAddrModeIndexed(N, 4, Base, OffImm);
91 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
92 return SelectAddrModeIndexed(N, 8, Base, OffImm);
94 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
95 return SelectAddrModeIndexed(N, 16, Base, OffImm);
97 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
98 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
100 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
101 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
103 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
104 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
106 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
107 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
109 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
110 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
114 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
115 SDValue &SignExtend, SDValue &DoShift) {
116 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
120 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
121 SDValue &SignExtend, SDValue &DoShift) {
122 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
126 /// Form sequences of consecutive 64/128-bit registers for use in NEON
127 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
128 /// between 1 and 4 elements. If it contains a single element that is returned
129 /// unchanged; otherwise a REG_SEQUENCE value is returned.
130 SDValue createDTuple(ArrayRef<SDValue> Vecs);
131 SDValue createQTuple(ArrayRef<SDValue> Vecs);
133 /// Generic helper for the createDTuple/createQTuple
134 /// functions. Those should almost always be called instead.
135 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
136 const unsigned SubRegs[]);
138 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
140 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
142 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
144 SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
146 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
147 SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
149 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
150 SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
151 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
152 SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
154 SDNode *SelectBitfieldExtractOp(SDNode *N);
155 SDNode *SelectBitfieldInsertOp(SDNode *N);
157 SDNode *SelectLIBM(SDNode *N);
159 // Include the pieces autogenerated from the target description.
160 #include "AArch64GenDAGISel.inc"
163 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
165 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
167 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
169 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
170 SDValue &Offset, SDValue &SignExtend,
172 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
173 SDValue &Offset, SDValue &SignExtend,
175 bool isWorthFolding(SDValue V) const;
176 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
177 SDValue &Offset, SDValue &SignExtend);
179 template<unsigned RegWidth>
180 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
181 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
184 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
186 } // end anonymous namespace
188 /// isIntImmediate - This method tests to see if the node is a constant
189 /// operand. If so Imm will receive the 32-bit value.
190 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
191 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
192 Imm = C->getZExtValue();
198 // isIntImmediate - This method tests to see if a constant operand.
199 // If so Imm will receive the value.
200 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
201 return isIntImmediate(N.getNode(), Imm);
204 // isOpcWithIntImmediate - This method tests to see if the node is a specific
205 // opcode and that it has a immediate integer right operand.
206 // If so Imm will receive the 32 bit value.
207 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
209 return N->getOpcode() == Opc &&
210 isIntImmediate(N->getOperand(1).getNode(), Imm);
213 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
214 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
215 switch(ConstraintID) {
217 llvm_unreachable("Unexpected asm memory constraint");
218 case InlineAsm::Constraint_i:
219 case InlineAsm::Constraint_m:
220 case InlineAsm::Constraint_Q:
221 // Require the address to be in a register. That is safe for all AArch64
222 // variants and it is hard to do anything much smarter without knowing
223 // how the operand is used.
224 OutOps.push_back(Op);
230 /// SelectArithImmed - Select an immediate value that can be represented as
231 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
232 /// Val set to the 12-bit value and Shift set to the shifter operand.
233 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
235 // This function is called from the addsub_shifted_imm ComplexPattern,
236 // which lists [imm] as the list of opcode it's interested in, however
237 // we still need to check whether the operand is actually an immediate
238 // here because the ComplexPattern opcode list is only used in
239 // root-level opcode matching.
240 if (!isa<ConstantSDNode>(N.getNode()))
243 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
246 if (Immed >> 12 == 0) {
248 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
254 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
256 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
257 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
261 /// SelectNegArithImmed - As above, but negates the value before trying to
263 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
265 // This function is called from the addsub_shifted_imm ComplexPattern,
266 // which lists [imm] as the list of opcode it's interested in, however
267 // we still need to check whether the operand is actually an immediate
268 // here because the ComplexPattern opcode list is only used in
269 // root-level opcode matching.
270 if (!isa<ConstantSDNode>(N.getNode()))
273 // The immediate operand must be a 24-bit zero-extended immediate.
274 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
276 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
277 // have the opposite effect on the C flag, so this pattern mustn't match under
278 // those circumstances.
282 if (N.getValueType() == MVT::i32)
283 Immed = ~((uint32_t)Immed) + 1;
285 Immed = ~Immed + 1ULL;
286 if (Immed & 0xFFFFFFFFFF000000ULL)
289 Immed &= 0xFFFFFFULL;
290 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
294 /// getShiftTypeForNode - Translate a shift node to the corresponding
296 static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
297 switch (N.getOpcode()) {
299 return AArch64_AM::InvalidShiftExtend;
301 return AArch64_AM::LSL;
303 return AArch64_AM::LSR;
305 return AArch64_AM::ASR;
307 return AArch64_AM::ROR;
311 /// \brief Determine whether it is worth to fold V into an extended register.
312 bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
313 // it hurts if the value is used at least twice, unless we are optimizing
315 if (ForCodeSize || V.hasOneUse())
320 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
321 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
322 /// instructions allow the shifted register to be rotated, but the arithmetic
323 /// instructions do not. The AllowROR parameter specifies whether ROR is
325 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
326 SDValue &Reg, SDValue &Shift) {
327 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
328 if (ShType == AArch64_AM::InvalidShiftExtend)
330 if (!AllowROR && ShType == AArch64_AM::ROR)
333 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
334 unsigned BitSize = N.getValueType().getSizeInBits();
335 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
336 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
338 Reg = N.getOperand(0);
339 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
340 return isWorthFolding(N);
346 /// getExtendTypeForNode - Translate an extend node to the corresponding
347 /// ExtendType value.
348 static AArch64_AM::ShiftExtendType
349 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
350 if (N.getOpcode() == ISD::SIGN_EXTEND ||
351 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
353 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
354 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
356 SrcVT = N.getOperand(0).getValueType();
358 if (!IsLoadStore && SrcVT == MVT::i8)
359 return AArch64_AM::SXTB;
360 else if (!IsLoadStore && SrcVT == MVT::i16)
361 return AArch64_AM::SXTH;
362 else if (SrcVT == MVT::i32)
363 return AArch64_AM::SXTW;
364 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
366 return AArch64_AM::InvalidShiftExtend;
367 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
368 N.getOpcode() == ISD::ANY_EXTEND) {
369 EVT SrcVT = N.getOperand(0).getValueType();
370 if (!IsLoadStore && SrcVT == MVT::i8)
371 return AArch64_AM::UXTB;
372 else if (!IsLoadStore && SrcVT == MVT::i16)
373 return AArch64_AM::UXTH;
374 else if (SrcVT == MVT::i32)
375 return AArch64_AM::UXTW;
376 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
378 return AArch64_AM::InvalidShiftExtend;
379 } else if (N.getOpcode() == ISD::AND) {
380 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
382 return AArch64_AM::InvalidShiftExtend;
383 uint64_t AndMask = CSD->getZExtValue();
387 return AArch64_AM::InvalidShiftExtend;
389 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
391 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
393 return AArch64_AM::UXTW;
397 return AArch64_AM::InvalidShiftExtend;
400 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
401 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
402 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
403 DL->getOpcode() != AArch64ISD::DUPLANE32)
406 SDValue SV = DL->getOperand(0);
407 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
410 SDValue EV = SV.getOperand(1);
411 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
414 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
415 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
416 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
417 LaneOp = EV.getOperand(0);
422 // Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
423 // high lane extract.
424 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
425 SDValue &LaneOp, int &LaneIdx) {
427 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
429 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
436 /// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
437 /// is a lane in the upper half of a 128-bit vector. Recognize and select this
438 /// so that we don't emit unnecessary lane extracts.
439 SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
441 SDValue Op0 = N->getOperand(0);
442 SDValue Op1 = N->getOperand(1);
443 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
444 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
445 int LaneIdx = -1; // Will hold the lane index.
447 if (Op1.getOpcode() != ISD::MUL ||
448 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
451 if (Op1.getOpcode() != ISD::MUL ||
452 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
457 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
459 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
461 unsigned MLAOpc = ~0U;
463 switch (N->getSimpleValueType(0).SimpleTy) {
465 llvm_unreachable("Unrecognized MLA.");
467 MLAOpc = AArch64::MLAv4i16_indexed;
470 MLAOpc = AArch64::MLAv8i16_indexed;
473 MLAOpc = AArch64::MLAv2i32_indexed;
476 MLAOpc = AArch64::MLAv4i32_indexed;
480 return CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops);
483 SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
489 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
493 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
495 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
497 unsigned SMULLOpc = ~0U;
499 if (IntNo == Intrinsic::aarch64_neon_smull) {
500 switch (N->getSimpleValueType(0).SimpleTy) {
502 llvm_unreachable("Unrecognized SMULL.");
504 SMULLOpc = AArch64::SMULLv4i16_indexed;
507 SMULLOpc = AArch64::SMULLv2i32_indexed;
510 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
511 switch (N->getSimpleValueType(0).SimpleTy) {
513 llvm_unreachable("Unrecognized SMULL.");
515 SMULLOpc = AArch64::UMULLv4i16_indexed;
518 SMULLOpc = AArch64::UMULLv2i32_indexed;
522 llvm_unreachable("Unrecognized intrinsic.");
524 return CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops);
527 /// Instructions that accept extend modifiers like UXTW expect the register
528 /// being extended to be a GPR32, but the incoming DAG might be acting on a
529 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
530 /// this is the case.
531 static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
532 if (N.getValueType() == MVT::i32)
536 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
537 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
538 dl, MVT::i32, N, SubReg);
539 return SDValue(Node, 0);
543 /// SelectArithExtendedRegister - Select a "extended register" operand. This
544 /// operand folds in an extend followed by an optional left shift.
545 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
547 unsigned ShiftVal = 0;
548 AArch64_AM::ShiftExtendType Ext;
550 if (N.getOpcode() == ISD::SHL) {
551 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
554 ShiftVal = CSD->getZExtValue();
558 Ext = getExtendTypeForNode(N.getOperand(0));
559 if (Ext == AArch64_AM::InvalidShiftExtend)
562 Reg = N.getOperand(0).getOperand(0);
564 Ext = getExtendTypeForNode(N);
565 if (Ext == AArch64_AM::InvalidShiftExtend)
568 Reg = N.getOperand(0);
571 // AArch64 mandates that the RHS of the operation must use the smallest
572 // register classs that could contain the size being extended from. Thus,
573 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
574 // there might not be an actual 32-bit value in the program. We can
575 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
576 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
577 Reg = narrowIfNeeded(CurDAG, Reg);
578 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
580 return isWorthFolding(N);
583 /// If there's a use of this ADDlow that's not itself a load/store then we'll
584 /// need to create a real ADD instruction from it anyway and there's no point in
585 /// folding it into the mem op. Theoretically, it shouldn't matter, but there's
586 /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
587 /// leads to duplaicated ADRP instructions.
588 static bool isWorthFoldingADDlow(SDValue N) {
589 for (auto Use : N->uses()) {
590 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
591 Use->getOpcode() != ISD::ATOMIC_LOAD &&
592 Use->getOpcode() != ISD::ATOMIC_STORE)
595 // ldar and stlr have much more restrictive addressing modes (just a
597 if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
604 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
605 /// immediate" address. The "Size" argument is the size in bytes of the memory
606 /// reference, which determines the scale.
607 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
608 SDValue &Base, SDValue &OffImm) {
610 const TargetLowering *TLI = getTargetLowering();
611 if (N.getOpcode() == ISD::FrameIndex) {
612 int FI = cast<FrameIndexSDNode>(N)->getIndex();
613 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
614 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
618 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
619 GlobalAddressSDNode *GAN =
620 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
621 Base = N.getOperand(0);
622 OffImm = N.getOperand(1);
626 const GlobalValue *GV = GAN->getGlobal();
627 unsigned Alignment = GV->getAlignment();
628 const DataLayout *DL = TLI->getDataLayout();
629 Type *Ty = GV->getType()->getElementType();
630 if (Alignment == 0 && Ty->isSized())
631 Alignment = DL->getABITypeAlignment(Ty);
633 if (Alignment >= Size)
637 if (CurDAG->isBaseWithConstantOffset(N)) {
638 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
639 int64_t RHSC = (int64_t)RHS->getZExtValue();
640 unsigned Scale = Log2_32(Size);
641 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
642 Base = N.getOperand(0);
643 if (Base.getOpcode() == ISD::FrameIndex) {
644 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
645 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
647 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
653 // Before falling back to our general case, check if the unscaled
654 // instructions can handle this. If so, that's preferable.
655 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
658 // Base only. The address will be materialized into a register before
659 // the memory is accessed.
660 // add x0, Xbase, #offset
663 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
667 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
668 /// immediate" address. This should only match when there is an offset that
669 /// is not valid for a scaled immediate addressing mode. The "Size" argument
670 /// is the size in bytes of the memory reference, which is needed here to know
671 /// what is valid for a scaled immediate.
672 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
675 if (!CurDAG->isBaseWithConstantOffset(N))
677 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
678 int64_t RHSC = RHS->getSExtValue();
679 // If the offset is valid as a scaled immediate, don't match here.
680 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
681 RHSC < (0x1000 << Log2_32(Size)))
683 if (RHSC >= -256 && RHSC < 256) {
684 Base = N.getOperand(0);
685 if (Base.getOpcode() == ISD::FrameIndex) {
686 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
687 const TargetLowering *TLI = getTargetLowering();
688 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
690 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
697 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
699 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
700 SDValue ImpDef = SDValue(
701 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
702 MachineSDNode *Node = CurDAG->getMachineNode(
703 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
704 return SDValue(Node, 0);
707 /// \brief Check if the given SHL node (\p N), can be used to form an
708 /// extended register for an addressing mode.
709 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
710 bool WantExtend, SDValue &Offset,
711 SDValue &SignExtend) {
712 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
713 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
714 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
719 AArch64_AM::ShiftExtendType Ext =
720 getExtendTypeForNode(N.getOperand(0), true);
721 if (Ext == AArch64_AM::InvalidShiftExtend)
724 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
725 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
728 Offset = N.getOperand(0);
729 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
732 unsigned LegalShiftVal = Log2_32(Size);
733 unsigned ShiftVal = CSD->getZExtValue();
735 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
738 if (isWorthFolding(N))
744 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
745 SDValue &Base, SDValue &Offset,
748 if (N.getOpcode() != ISD::ADD)
750 SDValue LHS = N.getOperand(0);
751 SDValue RHS = N.getOperand(1);
754 // We don't want to match immediate adds here, because they are better lowered
755 // to the register-immediate addressing modes.
756 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
759 // Check if this particular node is reused in any non-memory related
760 // operation. If yes, do not try to fold this node into the address
761 // computation, since the computation will be kept.
762 const SDNode *Node = N.getNode();
763 for (SDNode *UI : Node->uses()) {
764 if (!isa<MemSDNode>(*UI))
768 // Remember if it is worth folding N when it produces extended register.
769 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
771 // Try to match a shifted extend on the RHS.
772 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
773 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
775 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
779 // Try to match a shifted extend on the LHS.
780 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
781 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
783 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
787 // There was no shift, whatever else we find.
788 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
790 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
791 // Try to match an unshifted extend on the LHS.
792 if (IsExtendedRegisterWorthFolding &&
793 (Ext = getExtendTypeForNode(LHS, true)) !=
794 AArch64_AM::InvalidShiftExtend) {
796 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
797 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
799 if (isWorthFolding(LHS))
803 // Try to match an unshifted extend on the RHS.
804 if (IsExtendedRegisterWorthFolding &&
805 (Ext = getExtendTypeForNode(RHS, true)) !=
806 AArch64_AM::InvalidShiftExtend) {
808 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
809 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
811 if (isWorthFolding(RHS))
818 // Check if the given immediate is preferred by ADD. If an immediate can be
819 // encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
820 // encoded by one MOVZ, return true.
821 static bool isPreferredADD(int64_t ImmOff) {
822 // Constant in [0x0, 0xfff] can be encoded in ADD.
823 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
825 // Check if it can be encoded in an "ADD LSL #12".
826 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
827 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
828 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
829 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
833 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
834 SDValue &Base, SDValue &Offset,
837 if (N.getOpcode() != ISD::ADD)
839 SDValue LHS = N.getOperand(0);
840 SDValue RHS = N.getOperand(1);
843 // Check if this particular node is reused in any non-memory related
844 // operation. If yes, do not try to fold this node into the address
845 // computation, since the computation will be kept.
846 const SDNode *Node = N.getNode();
847 for (SDNode *UI : Node->uses()) {
848 if (!isa<MemSDNode>(*UI))
852 // Watch out if RHS is a wide immediate, it can not be selected into
853 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
854 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
855 // instructions like:
856 // MOV X0, WideImmediate
857 // ADD X1, BaseReg, X0
859 // For such situation, using [BaseReg, XReg] addressing mode can save one
861 // MOV X0, WideImmediate
862 // LDR X2, [BaseReg, X0]
863 if (isa<ConstantSDNode>(RHS)) {
864 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
865 unsigned Scale = Log2_32(Size);
866 // Skip the immediate can be seleced by load/store addressing mode.
867 // Also skip the immediate can be encoded by a single ADD (SUB is also
868 // checked by using -ImmOff).
869 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
870 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
873 SDValue Ops[] = { RHS };
875 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
876 SDValue MOVIV = SDValue(MOVI, 0);
877 // This ADD of two X register will be selected into [Reg+Reg] mode.
878 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
881 // Remember if it is worth folding N when it produces extended register.
882 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
884 // Try to match a shifted extend on the RHS.
885 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
886 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
888 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
892 // Try to match a shifted extend on the LHS.
893 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
894 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
896 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
900 // Match any non-shifted, non-extend, non-immediate add expression.
903 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
904 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
905 // Reg1 + Reg2 is free: no check needed.
909 SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
910 static const unsigned RegClassIDs[] = {
911 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
912 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
913 AArch64::dsub2, AArch64::dsub3};
915 return createTuple(Regs, RegClassIDs, SubRegs);
918 SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
919 static const unsigned RegClassIDs[] = {
920 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
921 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
922 AArch64::qsub2, AArch64::qsub3};
924 return createTuple(Regs, RegClassIDs, SubRegs);
927 SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
928 const unsigned RegClassIDs[],
929 const unsigned SubRegs[]) {
930 // There's no special register-class for a vector-list of 1 element: it's just
932 if (Regs.size() == 1)
935 assert(Regs.size() >= 2 && Regs.size() <= 4);
939 SmallVector<SDValue, 4> Ops;
941 // First operand of REG_SEQUENCE is the desired RegClass.
943 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
945 // Then we get pairs of source & subregister-position for the components.
946 for (unsigned i = 0; i < Regs.size(); ++i) {
947 Ops.push_back(Regs[i]);
948 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
952 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
953 return SDValue(N, 0);
956 SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
957 unsigned Opc, bool isExt) {
959 EVT VT = N->getValueType(0);
961 unsigned ExtOff = isExt;
963 // Form a REG_SEQUENCE to force register allocation.
964 unsigned Vec0Off = ExtOff + 1;
965 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
966 N->op_begin() + Vec0Off + NumVecs);
967 SDValue RegSeq = createQTuple(Regs);
969 SmallVector<SDValue, 6> Ops;
971 Ops.push_back(N->getOperand(1));
972 Ops.push_back(RegSeq);
973 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
974 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
977 SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
978 LoadSDNode *LD = cast<LoadSDNode>(N);
979 if (LD->isUnindexed())
981 EVT VT = LD->getMemoryVT();
982 EVT DstVT = N->getValueType(0);
983 ISD::MemIndexedMode AM = LD->getAddressingMode();
984 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
986 // We're not doing validity checking here. That was done when checking
987 // if we should mark the load as indexed or not. We're just selecting
988 // the right instruction.
991 ISD::LoadExtType ExtType = LD->getExtensionType();
992 bool InsertTo64 = false;
994 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
995 else if (VT == MVT::i32) {
996 if (ExtType == ISD::NON_EXTLOAD)
997 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
998 else if (ExtType == ISD::SEXTLOAD)
999 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1001 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1003 // The result of the load is only i32. It's the subreg_to_reg that makes
1007 } else if (VT == MVT::i16) {
1008 if (ExtType == ISD::SEXTLOAD) {
1009 if (DstVT == MVT::i64)
1010 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1012 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1014 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1015 InsertTo64 = DstVT == MVT::i64;
1016 // The result of the load is only i32. It's the subreg_to_reg that makes
1020 } else if (VT == MVT::i8) {
1021 if (ExtType == ISD::SEXTLOAD) {
1022 if (DstVT == MVT::i64)
1023 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1025 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1027 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1028 InsertTo64 = DstVT == MVT::i64;
1029 // The result of the load is only i32. It's the subreg_to_reg that makes
1033 } else if (VT == MVT::f32) {
1034 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1035 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1036 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1037 } else if (VT.is128BitVector()) {
1038 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1041 SDValue Chain = LD->getChain();
1042 SDValue Base = LD->getBasePtr();
1043 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1044 int OffsetVal = (int)OffsetOp->getZExtValue();
1046 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
1047 SDValue Ops[] = { Base, Offset, Chain };
1048 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
1050 // Either way, we're replacing the node, so tell the caller that.
1052 SDValue LoadedVal = SDValue(Res, 1);
1054 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1056 SDValue(CurDAG->getMachineNode(
1057 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1058 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1063 ReplaceUses(SDValue(N, 0), LoadedVal);
1064 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1065 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1070 SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
1071 unsigned Opc, unsigned SubRegIdx) {
1073 EVT VT = N->getValueType(0);
1074 SDValue Chain = N->getOperand(0);
1076 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1079 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1081 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1082 SDValue SuperReg = SDValue(Ld, 0);
1083 for (unsigned i = 0; i < NumVecs; ++i)
1084 ReplaceUses(SDValue(N, i),
1085 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1087 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1091 SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1092 unsigned Opc, unsigned SubRegIdx) {
1094 EVT VT = N->getValueType(0);
1095 SDValue Chain = N->getOperand(0);
1097 SDValue Ops[] = {N->getOperand(1), // Mem operand
1098 N->getOperand(2), // Incremental
1101 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1102 MVT::Untyped, MVT::Other};
1104 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1106 // Update uses of write back register
1107 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1109 // Update uses of vector list
1110 SDValue SuperReg = SDValue(Ld, 1);
1112 ReplaceUses(SDValue(N, 0), SuperReg);
1114 for (unsigned i = 0; i < NumVecs; ++i)
1115 ReplaceUses(SDValue(N, i),
1116 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1119 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1123 SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1126 EVT VT = N->getOperand(2)->getValueType(0);
1128 // Form a REG_SEQUENCE to force register allocation.
1129 bool Is128Bit = VT.getSizeInBits() == 128;
1130 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1131 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1133 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
1134 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1139 SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1142 EVT VT = N->getOperand(2)->getValueType(0);
1143 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1144 MVT::Other}; // Type for the Chain
1146 // Form a REG_SEQUENCE to force register allocation.
1147 bool Is128Bit = VT.getSizeInBits() == 128;
1148 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1149 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1151 SDValue Ops[] = {RegSeq,
1152 N->getOperand(NumVecs + 1), // base register
1153 N->getOperand(NumVecs + 2), // Incremental
1154 N->getOperand(0)}; // Chain
1155 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1161 /// WidenVector - Given a value in the V64 register class, produce the
1162 /// equivalent value in the V128 register class.
1167 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1169 SDValue operator()(SDValue V64Reg) {
1170 EVT VT = V64Reg.getValueType();
1171 unsigned NarrowSize = VT.getVectorNumElements();
1172 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1173 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1177 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1178 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1183 /// NarrowVector - Given a value in the V128 register class, produce the
1184 /// equivalent value in the V64 register class.
1185 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1186 EVT VT = V128Reg.getValueType();
1187 unsigned WideSize = VT.getVectorNumElements();
1188 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1189 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1191 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1195 SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1198 EVT VT = N->getValueType(0);
1199 bool Narrow = VT.getSizeInBits() == 64;
1201 // Form a REG_SEQUENCE to force register allocation.
1202 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1205 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1206 WidenVector(*CurDAG));
1208 SDValue RegSeq = createQTuple(Regs);
1210 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1213 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1215 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1216 N->getOperand(NumVecs + 3), N->getOperand(0)};
1217 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1218 SDValue SuperReg = SDValue(Ld, 0);
1220 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1221 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1223 for (unsigned i = 0; i < NumVecs; ++i) {
1224 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1226 NV = NarrowVector(NV, *CurDAG);
1227 ReplaceUses(SDValue(N, i), NV);
1230 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1235 SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1238 EVT VT = N->getValueType(0);
1239 bool Narrow = VT.getSizeInBits() == 64;
1241 // Form a REG_SEQUENCE to force register allocation.
1242 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1245 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1246 WidenVector(*CurDAG));
1248 SDValue RegSeq = createQTuple(Regs);
1250 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1251 RegSeq->getValueType(0), MVT::Other};
1254 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1256 SDValue Ops[] = {RegSeq,
1257 CurDAG->getTargetConstant(LaneNo, dl,
1258 MVT::i64), // Lane Number
1259 N->getOperand(NumVecs + 2), // Base register
1260 N->getOperand(NumVecs + 3), // Incremental
1262 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1264 // Update uses of the write back register
1265 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1267 // Update uses of the vector list
1268 SDValue SuperReg = SDValue(Ld, 1);
1270 ReplaceUses(SDValue(N, 0),
1271 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1273 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1274 static unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, AArch64::qsub2,
1276 for (unsigned i = 0; i < NumVecs; ++i) {
1277 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1280 NV = NarrowVector(NV, *CurDAG);
1281 ReplaceUses(SDValue(N, i), NV);
1286 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1291 SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1294 EVT VT = N->getOperand(2)->getValueType(0);
1295 bool Narrow = VT.getSizeInBits() == 64;
1297 // Form a REG_SEQUENCE to force register allocation.
1298 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1301 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1302 WidenVector(*CurDAG));
1304 SDValue RegSeq = createQTuple(Regs);
1307 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1309 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1310 N->getOperand(NumVecs + 3), N->getOperand(0)};
1311 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1313 // Transfer memoperands.
1314 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1315 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1316 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1321 SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1324 EVT VT = N->getOperand(2)->getValueType(0);
1325 bool Narrow = VT.getSizeInBits() == 64;
1327 // Form a REG_SEQUENCE to force register allocation.
1328 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1331 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1332 WidenVector(*CurDAG));
1334 SDValue RegSeq = createQTuple(Regs);
1336 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1340 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1342 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1343 N->getOperand(NumVecs + 2), // Base Register
1344 N->getOperand(NumVecs + 3), // Incremental
1346 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1348 // Transfer memoperands.
1349 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1350 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1351 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1356 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1357 unsigned &Opc, SDValue &Opd0,
1358 unsigned &LSB, unsigned &MSB,
1359 unsigned NumberOfIgnoredLowBits,
1360 bool BiggerPattern) {
1361 assert(N->getOpcode() == ISD::AND &&
1362 "N must be a AND operation to call this function");
1364 EVT VT = N->getValueType(0);
1366 // Here we can test the type of VT and return false when the type does not
1367 // match, but since it is done prior to that call in the current context
1368 // we turned that into an assert to avoid redundant code.
1369 assert((VT == MVT::i32 || VT == MVT::i64) &&
1370 "Type checking must have been done before calling this function");
1372 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1373 // changed the AND node to a 32-bit mask operation. We'll have to
1374 // undo that as part of the transform here if we want to catch all
1375 // the opportunities.
1376 // Currently the NumberOfIgnoredLowBits argument helps to recover
1377 // form these situations when matching bigger pattern (bitfield insert).
1379 // For unsigned extracts, check for a shift right and mask
1380 uint64_t And_imm = 0;
1381 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1384 const SDNode *Op0 = N->getOperand(0).getNode();
1386 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1387 // simplified. Try to undo that
1388 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1390 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1391 if (And_imm & (And_imm + 1))
1394 bool ClampMSB = false;
1395 uint64_t Srl_imm = 0;
1396 // Handle the SRL + ANY_EXTEND case.
1397 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1398 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1399 // Extend the incoming operand of the SRL to 64-bit.
1400 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1401 // Make sure to clamp the MSB so that we preserve the semantics of the
1402 // original operations.
1404 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1405 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1407 // If the shift result was truncated, we can still combine them.
1408 Opd0 = Op0->getOperand(0).getOperand(0);
1410 // Use the type of SRL node.
1411 VT = Opd0->getValueType(0);
1412 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1413 Opd0 = Op0->getOperand(0);
1414 } else if (BiggerPattern) {
1415 // Let's pretend a 0 shift right has been performed.
1416 // The resulting code will be at least as good as the original one
1417 // plus it may expose more opportunities for bitfield insert pattern.
1418 // FIXME: Currently we limit this to the bigger pattern, because
1419 // some optimizations expect AND and not UBFM
1420 Opd0 = N->getOperand(0);
1424 // Bail out on large immediates. This happens when no proper
1425 // combining/constant folding was performed.
1426 if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) {
1428 << ": Found large shift immediate, this should not happen\n"));
1433 MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
1434 : countTrailingOnes<uint64_t>(And_imm)) -
1437 // Since we're moving the extend before the right shift operation, we need
1438 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1439 // the zeros which would get shifted in with the original right shift
1441 MSB = MSB > 31 ? 31 : MSB;
1443 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1447 static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1448 SDValue &Opd0, unsigned &LSB,
1450 // We are looking for the following pattern which basically extracts several
1451 // continuous bits from the source value and places it from the LSB of the
1452 // destination value, all other bits of the destination value or set to zero:
1454 // Value2 = AND Value, MaskImm
1455 // SRL Value2, ShiftImm
1457 // with MaskImm >> ShiftImm to search for the bit width.
1459 // This gets selected into a single UBFM:
1461 // UBFM Value, ShiftImm, BitWide + Srl_imm -1
1464 if (N->getOpcode() != ISD::SRL)
1467 uint64_t And_mask = 0;
1468 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1471 Opd0 = N->getOperand(0).getOperand(0);
1473 uint64_t Srl_imm = 0;
1474 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1477 // Check whether we really have several bits extract here.
1478 unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm));
1479 if (BitWide && isMask_64(And_mask >> Srl_imm)) {
1480 if (N->getValueType(0) == MVT::i32)
1481 Opc = AArch64::UBFMWri;
1483 Opc = AArch64::UBFMXri;
1486 MSB = BitWide + Srl_imm - 1;
1493 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1494 unsigned &LSB, unsigned &MSB,
1495 bool BiggerPattern) {
1496 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1497 "N must be a SHR/SRA operation to call this function");
1499 EVT VT = N->getValueType(0);
1501 // Here we can test the type of VT and return false when the type does not
1502 // match, but since it is done prior to that call in the current context
1503 // we turned that into an assert to avoid redundant code.
1504 assert((VT == MVT::i32 || VT == MVT::i64) &&
1505 "Type checking must have been done before calling this function");
1507 // Check for AND + SRL doing several bits extract.
1508 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, LSB, MSB))
1511 // we're looking for a shift of a shift
1512 uint64_t Shl_imm = 0;
1513 uint64_t Trunc_bits = 0;
1514 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1515 Opd0 = N->getOperand(0).getOperand(0);
1516 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1517 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1518 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1519 // be considered as setting high 32 bits as zero. Our strategy here is to
1520 // always generate 64bit UBFM. This consistency will help the CSE pass
1521 // later find more redundancy.
1522 Opd0 = N->getOperand(0).getOperand(0);
1523 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1524 VT = Opd0->getValueType(0);
1525 assert(VT == MVT::i64 && "the promoted type should be i64");
1526 } else if (BiggerPattern) {
1527 // Let's pretend a 0 shift left has been performed.
1528 // FIXME: Currently we limit this to the bigger pattern case,
1529 // because some optimizations expect AND and not UBFM
1530 Opd0 = N->getOperand(0);
1534 // Missing combines/constant folding may have left us with strange
1536 if (Shl_imm >= VT.getSizeInBits()) {
1538 << ": Found large shift immediate, this should not happen\n"));
1542 uint64_t Srl_imm = 0;
1543 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1546 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1547 "bad amount in shift node!");
1548 // Note: The width operand is encoded as width-1.
1549 unsigned Width = VT.getSizeInBits() - Trunc_bits - Srl_imm - 1;
1550 int sLSB = Srl_imm - Shl_imm;
1555 // SRA requires a signed extraction
1557 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1559 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1563 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1564 SDValue &Opd0, unsigned &LSB, unsigned &MSB,
1565 unsigned NumberOfIgnoredLowBits = 0,
1566 bool BiggerPattern = false) {
1567 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1570 switch (N->getOpcode()) {
1572 if (!N->isMachineOpcode())
1576 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB, MSB,
1577 NumberOfIgnoredLowBits, BiggerPattern);
1580 return isBitfieldExtractOpFromShr(N, Opc, Opd0, LSB, MSB, BiggerPattern);
1583 unsigned NOpc = N->getMachineOpcode();
1587 case AArch64::SBFMWri:
1588 case AArch64::UBFMWri:
1589 case AArch64::SBFMXri:
1590 case AArch64::UBFMXri:
1592 Opd0 = N->getOperand(0);
1593 LSB = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1594 MSB = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1601 SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1602 unsigned Opc, LSB, MSB;
1604 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
1607 EVT VT = N->getValueType(0);
1610 // If the bit extract operation is 64bit but the original type is 32bit, we
1611 // need to add one EXTRACT_SUBREG.
1612 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
1613 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, dl, MVT::i64),
1614 CurDAG->getTargetConstant(MSB, dl, MVT::i64)};
1616 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
1617 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1618 MachineSDNode *Node =
1619 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i32,
1620 SDValue(BFM, 0), SubReg);
1624 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, dl, VT),
1625 CurDAG->getTargetConstant(MSB, dl, VT)};
1626 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1629 /// Does DstMask form a complementary pair with the mask provided by
1630 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1631 /// this asks whether DstMask zeroes precisely those bits that will be set by
1633 static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1634 unsigned NumberOfIgnoredHighBits, EVT VT) {
1635 assert((VT == MVT::i32 || VT == MVT::i64) &&
1636 "i32 or i64 mask type expected!");
1637 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1639 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1640 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1642 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1643 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1646 // Look for bits that will be useful for later uses.
1647 // A bit is consider useless as soon as it is dropped and never used
1648 // before it as been dropped.
1649 // E.g., looking for useful bit of x
1652 // After #1, x useful bits are 0x7, then the useful bits of x, live through
1654 // After #2, the useful bits of x are 0x4.
1655 // However, if x is used on an unpredicatable instruction, then all its bits
1661 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1663 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1666 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1667 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1668 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1669 getUsefulBits(Op, UsefulBits, Depth + 1);
1672 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1673 uint64_t Imm, uint64_t MSB,
1675 // inherit the bitwidth value
1676 APInt OpUsefulBits(UsefulBits);
1680 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1682 // The interesting part will be in the lower part of the result
1683 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1684 // The interesting part was starting at Imm in the argument
1685 OpUsefulBits = OpUsefulBits.shl(Imm);
1687 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1689 // The interesting part will be shifted in the result
1690 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1691 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1692 // The interesting part was at zero in the argument
1693 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1696 UsefulBits &= OpUsefulBits;
1699 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1702 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1704 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1706 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1709 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1711 uint64_t ShiftTypeAndValue =
1712 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1713 APInt Mask(UsefulBits);
1714 Mask.clearAllBits();
1717 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1719 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1720 Mask = Mask.shl(ShiftAmt);
1721 getUsefulBits(Op, Mask, Depth + 1);
1722 Mask = Mask.lshr(ShiftAmt);
1723 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1725 // We do not handle AArch64_AM::ASR, because the sign will change the
1726 // number of useful bits
1727 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1728 Mask = Mask.lshr(ShiftAmt);
1729 getUsefulBits(Op, Mask, Depth + 1);
1730 Mask = Mask.shl(ShiftAmt);
1737 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1740 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1742 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1744 if (Op.getOperand(1) == Orig)
1745 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1747 APInt OpUsefulBits(UsefulBits);
1751 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1753 UsefulBits &= ~OpUsefulBits;
1754 getUsefulBits(Op, UsefulBits, Depth + 1);
1756 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1758 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1759 getUsefulBits(Op, UsefulBits, Depth + 1);
1763 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1764 SDValue Orig, unsigned Depth) {
1766 // Users of this node should have already been instruction selected
1767 // FIXME: Can we turn that into an assert?
1768 if (!UserNode->isMachineOpcode())
1771 switch (UserNode->getMachineOpcode()) {
1774 case AArch64::ANDSWri:
1775 case AArch64::ANDSXri:
1776 case AArch64::ANDWri:
1777 case AArch64::ANDXri:
1778 // We increment Depth only when we call the getUsefulBits
1779 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1781 case AArch64::UBFMWri:
1782 case AArch64::UBFMXri:
1783 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1785 case AArch64::ORRWrs:
1786 case AArch64::ORRXrs:
1787 if (UserNode->getOperand(1) != Orig)
1789 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1791 case AArch64::BFMWri:
1792 case AArch64::BFMXri:
1793 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1797 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1800 // Initialize UsefulBits
1802 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1803 // At the beginning, assume every produced bits is useful
1804 UsefulBits = APInt(Bitwidth, 0);
1805 UsefulBits.flipAllBits();
1807 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1809 for (SDNode *Node : Op.getNode()->uses()) {
1810 // A use cannot produce useful bits
1811 APInt UsefulBitsForUse = APInt(UsefulBits);
1812 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1813 UsersUsefulBits |= UsefulBitsForUse;
1815 // UsefulBits contains the produced bits that are meaningful for the
1816 // current definition, thus a user cannot make a bit meaningful at
1818 UsefulBits &= UsersUsefulBits;
1821 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
1822 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1823 /// 0, return Op unchanged.
1824 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1828 EVT VT = Op.getValueType();
1830 unsigned BitWidth = VT.getSizeInBits();
1831 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1834 if (ShlAmount > 0) {
1835 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1836 ShiftNode = CurDAG->getMachineNode(
1837 UBFMOpc, dl, VT, Op,
1838 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
1839 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
1841 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1842 assert(ShlAmount < 0 && "expected right shift");
1843 int ShrAmount = -ShlAmount;
1844 ShiftNode = CurDAG->getMachineNode(
1845 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
1846 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
1849 return SDValue(ShiftNode, 0);
1852 /// Does this tree qualify as an attempt to move a bitfield into position,
1853 /// essentially "(and (shl VAL, N), Mask)".
1854 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1855 SDValue &Src, int &ShiftAmount,
1857 EVT VT = Op.getValueType();
1858 unsigned BitWidth = VT.getSizeInBits();
1860 assert(BitWidth == 32 || BitWidth == 64);
1862 APInt KnownZero, KnownOne;
1863 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
1865 // Non-zero in the sense that they're not provably zero, which is the key
1866 // point if we want to use this value
1867 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1869 // Discard a constant AND mask if present. It's safe because the node will
1870 // already have been factored into the computeKnownBits calculation above.
1872 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1873 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1874 Op = Op.getOperand(0);
1878 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1880 Op = Op.getOperand(0);
1882 if (!isShiftedMask_64(NonZeroBits))
1885 ShiftAmount = countTrailingZeros(NonZeroBits);
1886 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
1888 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1889 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1891 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1896 // Given a OR operation, check if we have the following pattern
1897 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
1898 // isBitfieldExtractOp)
1899 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1900 // countTrailingZeros(mask2) == imm2 - imm + 1
1902 // if yes, given reference arguments will be update so that one can replace
1903 // the OR instruction with:
1904 // f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1905 static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1906 SDValue &Src, unsigned &ImmR,
1907 unsigned &ImmS, SelectionDAG *CurDAG) {
1908 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1911 EVT VT = N->getValueType(0);
1913 Opc = AArch64::BFMWri;
1914 else if (VT == MVT::i64)
1915 Opc = AArch64::BFMXri;
1919 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1920 // have the expected shape. Try to undo that.
1922 getUsefulBits(SDValue(N, 0), UsefulBits);
1924 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1925 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1927 // OR is commutative, check both possibilities (does llvm provide a
1928 // way to do that directely, e.g., via code matcher?)
1929 SDValue OrOpd1Val = N->getOperand(1);
1930 SDNode *OrOpd0 = N->getOperand(0).getNode();
1931 SDNode *OrOpd1 = N->getOperand(1).getNode();
1932 for (int i = 0; i < 2;
1933 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
1936 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
1937 NumberOfIgnoredLowBits, true)) {
1938 // Check that the returned opcode is compatible with the pattern,
1939 // i.e., same type and zero extended (U and not S)
1940 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
1941 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
1944 // Compute the width of the bitfield insertion
1946 Width = ImmS - ImmR + 1;
1947 // FIXME: This constraint is to catch bitfield insertion we may
1948 // want to widen the pattern if we want to grab general bitfied
1953 // If the mask on the insertee is correct, we have a BFXIL operation. We
1954 // can share the ImmR and ImmS values from the already-computed UBFM.
1955 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
1957 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
1962 // Check the second part of the pattern
1963 EVT VT = OrOpd1->getValueType(0);
1964 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
1966 // Compute the Known Zero for the candidate of the first operand.
1967 // This allows to catch more general case than just looking for
1968 // AND with imm. Indeed, simplify-demanded-bits may have removed
1969 // the AND instruction because it proves it was useless.
1970 APInt KnownZero, KnownOne;
1971 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
1973 // Check if there is enough room for the second operand to appear
1975 APInt BitsToBeInserted =
1976 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
1978 if ((BitsToBeInserted & ~KnownZero) != 0)
1981 // Set the first operand
1983 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
1984 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
1985 // In that case, we can eliminate the AND
1986 Dst = OrOpd1->getOperand(0);
1988 // Maybe the AND has been removed by simplify-demanded-bits
1989 // or is useful because it discards more bits
1999 SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
2000 if (N->getOpcode() != ISD::OR)
2007 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
2010 EVT VT = N->getValueType(0);
2012 SDValue Ops[] = { Opd0,
2014 CurDAG->getTargetConstant(LSB, dl, VT),
2015 CurDAG->getTargetConstant(MSB, dl, VT) };
2016 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2019 SDNode *AArch64DAGToDAGISel::SelectLIBM(SDNode *N) {
2020 EVT VT = N->getValueType(0);
2023 unsigned FRINTXOpcs[] = { AArch64::FRINTXSr, AArch64::FRINTXDr };
2025 if (VT == MVT::f32) {
2027 } else if (VT == MVT::f64) {
2030 return nullptr; // Unrecognized argument type. Fall back on default codegen.
2032 // Pick the FRINTX variant needed to set the flags.
2033 unsigned FRINTXOpc = FRINTXOpcs[Variant];
2035 switch (N->getOpcode()) {
2037 return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
2039 unsigned FRINTPOpcs[] = { AArch64::FRINTPSr, AArch64::FRINTPDr };
2040 Opc = FRINTPOpcs[Variant];
2044 unsigned FRINTMOpcs[] = { AArch64::FRINTMSr, AArch64::FRINTMDr };
2045 Opc = FRINTMOpcs[Variant];
2049 unsigned FRINTZOpcs[] = { AArch64::FRINTZSr, AArch64::FRINTZDr };
2050 Opc = FRINTZOpcs[Variant];
2054 unsigned FRINTAOpcs[] = { AArch64::FRINTASr, AArch64::FRINTADr };
2055 Opc = FRINTAOpcs[Variant];
2061 SDValue In = N->getOperand(0);
2062 SmallVector<SDValue, 2> Ops;
2065 if (!TM.Options.UnsafeFPMath) {
2066 SDNode *FRINTX = CurDAG->getMachineNode(FRINTXOpc, dl, VT, MVT::Glue, In);
2067 Ops.push_back(SDValue(FRINTX, 1));
2070 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2074 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2075 unsigned RegWidth) {
2077 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2078 FVal = CN->getValueAPF();
2079 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2080 // Some otherwise illegal constants are allowed in this case.
2081 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2082 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2085 ConstantPoolSDNode *CN =
2086 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2087 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2091 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2092 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2095 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2096 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2100 // fbits is between 1 and 64 in the worst-case, which means the fmul
2101 // could have 2^64 as an actual operand. Need 65 bits of precision.
2102 APSInt IntVal(65, true);
2103 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2105 // N.b. isPowerOf2 also checks for > 0.
2106 if (!IsExact || !IntVal.isPowerOf2()) return false;
2107 unsigned FBits = IntVal.logBase2();
2109 // Checks above should have guaranteed that we haven't lost information in
2110 // finding FBits, but it must still be in range.
2111 if (FBits == 0 || FBits > RegWidth) return false;
2113 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
2117 SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
2118 // Dump information about the Node being selected
2119 DEBUG(errs() << "Selecting: ");
2120 DEBUG(Node->dump(CurDAG));
2121 DEBUG(errs() << "\n");
2123 // If we have a custom node, we already have selected!
2124 if (Node->isMachineOpcode()) {
2125 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2126 Node->setNodeId(-1);
2130 // Few custom selection stuff.
2131 SDNode *ResNode = nullptr;
2132 EVT VT = Node->getValueType(0);
2134 switch (Node->getOpcode()) {
2139 if (SDNode *I = SelectMLAV64LaneV128(Node))
2144 // Try to select as an indexed load. Fall through to normal processing
2147 SDNode *I = SelectIndexedLoad(Node, Done);
2156 if (SDNode *I = SelectBitfieldExtractOp(Node))
2161 if (SDNode *I = SelectBitfieldInsertOp(Node))
2165 case ISD::EXTRACT_VECTOR_ELT: {
2166 // Extracting lane zero is a special case where we can just use a plain
2167 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2168 // the rest of the compiler, especially the register allocator and copyi
2169 // propagation, to reason about, so is preferred when it's possible to
2171 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2172 // Bail and use the default Select() for non-zero lanes.
2173 if (LaneNode->getZExtValue() != 0)
2175 // If the element type is not the same as the result type, likewise
2176 // bail and use the default Select(), as there's more to do than just
2177 // a cross-class COPY. This catches extracts of i8 and i16 elements
2178 // since they will need an explicit zext.
2179 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2182 switch (Node->getOperand(0)
2184 .getVectorElementType()
2187 llvm_unreachable("Unexpected vector element type!");
2189 SubReg = AArch64::dsub;
2192 SubReg = AArch64::ssub;
2195 SubReg = AArch64::hsub;
2198 llvm_unreachable("unexpected zext-requiring extract element!");
2200 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2201 Node->getOperand(0));
2202 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2203 DEBUG(Extract->dumpr(CurDAG));
2204 DEBUG(dbgs() << "\n");
2205 return Extract.getNode();
2207 case ISD::Constant: {
2208 // Materialize zero constants as copies from WZR/XZR. This allows
2209 // the coalescer to propagate these into other instructions.
2210 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2211 if (ConstNode->isNullValue()) {
2213 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2214 AArch64::WZR, MVT::i32).getNode();
2215 else if (VT == MVT::i64)
2216 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2217 AArch64::XZR, MVT::i64).getNode();
2222 case ISD::FrameIndex: {
2223 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2224 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2225 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2226 const TargetLowering *TLI = getTargetLowering();
2227 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
2229 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
2230 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
2231 return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
2233 case ISD::INTRINSIC_W_CHAIN: {
2234 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2238 case Intrinsic::aarch64_ldaxp:
2239 case Intrinsic::aarch64_ldxp: {
2241 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2242 SDValue MemAddr = Node->getOperand(2);
2244 SDValue Chain = Node->getOperand(0);
2246 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2247 MVT::Other, MemAddr, Chain);
2249 // Transfer memoperands.
2250 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2251 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2252 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2255 case Intrinsic::aarch64_stlxp:
2256 case Intrinsic::aarch64_stxp: {
2258 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2260 SDValue Chain = Node->getOperand(0);
2261 SDValue ValLo = Node->getOperand(2);
2262 SDValue ValHi = Node->getOperand(3);
2263 SDValue MemAddr = Node->getOperand(4);
2265 // Place arguments in the right order.
2266 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
2268 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2269 // Transfer memoperands.
2270 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2271 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2272 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2276 case Intrinsic::aarch64_neon_ld1x2:
2277 if (VT == MVT::v8i8)
2278 return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
2279 else if (VT == MVT::v16i8)
2280 return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
2281 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2282 return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
2283 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2284 return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
2285 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2286 return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
2287 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2288 return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
2289 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2290 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2291 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2292 return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
2294 case Intrinsic::aarch64_neon_ld1x3:
2295 if (VT == MVT::v8i8)
2296 return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
2297 else if (VT == MVT::v16i8)
2298 return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
2299 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2300 return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
2301 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2302 return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
2303 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2304 return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
2305 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2306 return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
2307 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2308 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2309 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2310 return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
2312 case Intrinsic::aarch64_neon_ld1x4:
2313 if (VT == MVT::v8i8)
2314 return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
2315 else if (VT == MVT::v16i8)
2316 return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
2317 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2318 return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
2319 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2320 return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
2321 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2322 return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
2323 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2324 return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
2325 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2326 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2327 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2328 return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
2330 case Intrinsic::aarch64_neon_ld2:
2331 if (VT == MVT::v8i8)
2332 return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
2333 else if (VT == MVT::v16i8)
2334 return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
2335 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2336 return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
2337 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2338 return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
2339 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2340 return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
2341 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2342 return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
2343 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2344 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2345 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2346 return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
2348 case Intrinsic::aarch64_neon_ld3:
2349 if (VT == MVT::v8i8)
2350 return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
2351 else if (VT == MVT::v16i8)
2352 return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
2353 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2354 return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
2355 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2356 return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
2357 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2358 return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
2359 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2360 return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
2361 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2362 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2363 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2364 return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
2366 case Intrinsic::aarch64_neon_ld4:
2367 if (VT == MVT::v8i8)
2368 return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
2369 else if (VT == MVT::v16i8)
2370 return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
2371 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2372 return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
2373 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2374 return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
2375 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2376 return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
2377 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2378 return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
2379 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2380 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2381 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2382 return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
2384 case Intrinsic::aarch64_neon_ld2r:
2385 if (VT == MVT::v8i8)
2386 return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
2387 else if (VT == MVT::v16i8)
2388 return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
2389 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2390 return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
2391 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2392 return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
2393 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2394 return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
2395 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2396 return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
2397 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2398 return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
2399 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2400 return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
2402 case Intrinsic::aarch64_neon_ld3r:
2403 if (VT == MVT::v8i8)
2404 return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
2405 else if (VT == MVT::v16i8)
2406 return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
2407 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2408 return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
2409 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2410 return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
2411 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2412 return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
2413 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2414 return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
2415 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2416 return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
2417 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2418 return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
2420 case Intrinsic::aarch64_neon_ld4r:
2421 if (VT == MVT::v8i8)
2422 return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
2423 else if (VT == MVT::v16i8)
2424 return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
2425 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2426 return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
2427 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2428 return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
2429 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2430 return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
2431 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2432 return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
2433 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2434 return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
2435 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2436 return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
2438 case Intrinsic::aarch64_neon_ld2lane:
2439 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2440 return SelectLoadLane(Node, 2, AArch64::LD2i8);
2441 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2443 return SelectLoadLane(Node, 2, AArch64::LD2i16);
2444 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2446 return SelectLoadLane(Node, 2, AArch64::LD2i32);
2447 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2449 return SelectLoadLane(Node, 2, AArch64::LD2i64);
2451 case Intrinsic::aarch64_neon_ld3lane:
2452 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2453 return SelectLoadLane(Node, 3, AArch64::LD3i8);
2454 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2456 return SelectLoadLane(Node, 3, AArch64::LD3i16);
2457 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2459 return SelectLoadLane(Node, 3, AArch64::LD3i32);
2460 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2462 return SelectLoadLane(Node, 3, AArch64::LD3i64);
2464 case Intrinsic::aarch64_neon_ld4lane:
2465 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2466 return SelectLoadLane(Node, 4, AArch64::LD4i8);
2467 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2469 return SelectLoadLane(Node, 4, AArch64::LD4i16);
2470 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2472 return SelectLoadLane(Node, 4, AArch64::LD4i32);
2473 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2475 return SelectLoadLane(Node, 4, AArch64::LD4i64);
2479 case ISD::INTRINSIC_WO_CHAIN: {
2480 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2484 case Intrinsic::aarch64_neon_tbl2:
2485 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
2486 : AArch64::TBLv16i8Two,
2488 case Intrinsic::aarch64_neon_tbl3:
2489 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
2490 : AArch64::TBLv16i8Three,
2492 case Intrinsic::aarch64_neon_tbl4:
2493 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
2494 : AArch64::TBLv16i8Four,
2496 case Intrinsic::aarch64_neon_tbx2:
2497 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
2498 : AArch64::TBXv16i8Two,
2500 case Intrinsic::aarch64_neon_tbx3:
2501 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
2502 : AArch64::TBXv16i8Three,
2504 case Intrinsic::aarch64_neon_tbx4:
2505 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
2506 : AArch64::TBXv16i8Four,
2508 case Intrinsic::aarch64_neon_smull:
2509 case Intrinsic::aarch64_neon_umull:
2510 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2516 case ISD::INTRINSIC_VOID: {
2517 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2518 if (Node->getNumOperands() >= 3)
2519 VT = Node->getOperand(2)->getValueType(0);
2523 case Intrinsic::aarch64_neon_st1x2: {
2524 if (VT == MVT::v8i8)
2525 return SelectStore(Node, 2, AArch64::ST1Twov8b);
2526 else if (VT == MVT::v16i8)
2527 return SelectStore(Node, 2, AArch64::ST1Twov16b);
2528 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2529 return SelectStore(Node, 2, AArch64::ST1Twov4h);
2530 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2531 return SelectStore(Node, 2, AArch64::ST1Twov8h);
2532 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2533 return SelectStore(Node, 2, AArch64::ST1Twov2s);
2534 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2535 return SelectStore(Node, 2, AArch64::ST1Twov4s);
2536 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2537 return SelectStore(Node, 2, AArch64::ST1Twov2d);
2538 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2539 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2542 case Intrinsic::aarch64_neon_st1x3: {
2543 if (VT == MVT::v8i8)
2544 return SelectStore(Node, 3, AArch64::ST1Threev8b);
2545 else if (VT == MVT::v16i8)
2546 return SelectStore(Node, 3, AArch64::ST1Threev16b);
2547 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2548 return SelectStore(Node, 3, AArch64::ST1Threev4h);
2549 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2550 return SelectStore(Node, 3, AArch64::ST1Threev8h);
2551 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2552 return SelectStore(Node, 3, AArch64::ST1Threev2s);
2553 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2554 return SelectStore(Node, 3, AArch64::ST1Threev4s);
2555 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2556 return SelectStore(Node, 3, AArch64::ST1Threev2d);
2557 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2558 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2561 case Intrinsic::aarch64_neon_st1x4: {
2562 if (VT == MVT::v8i8)
2563 return SelectStore(Node, 4, AArch64::ST1Fourv8b);
2564 else if (VT == MVT::v16i8)
2565 return SelectStore(Node, 4, AArch64::ST1Fourv16b);
2566 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2567 return SelectStore(Node, 4, AArch64::ST1Fourv4h);
2568 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2569 return SelectStore(Node, 4, AArch64::ST1Fourv8h);
2570 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2571 return SelectStore(Node, 4, AArch64::ST1Fourv2s);
2572 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2573 return SelectStore(Node, 4, AArch64::ST1Fourv4s);
2574 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2575 return SelectStore(Node, 4, AArch64::ST1Fourv2d);
2576 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2577 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2580 case Intrinsic::aarch64_neon_st2: {
2581 if (VT == MVT::v8i8)
2582 return SelectStore(Node, 2, AArch64::ST2Twov8b);
2583 else if (VT == MVT::v16i8)
2584 return SelectStore(Node, 2, AArch64::ST2Twov16b);
2585 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2586 return SelectStore(Node, 2, AArch64::ST2Twov4h);
2587 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2588 return SelectStore(Node, 2, AArch64::ST2Twov8h);
2589 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2590 return SelectStore(Node, 2, AArch64::ST2Twov2s);
2591 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2592 return SelectStore(Node, 2, AArch64::ST2Twov4s);
2593 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2594 return SelectStore(Node, 2, AArch64::ST2Twov2d);
2595 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2596 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2599 case Intrinsic::aarch64_neon_st3: {
2600 if (VT == MVT::v8i8)
2601 return SelectStore(Node, 3, AArch64::ST3Threev8b);
2602 else if (VT == MVT::v16i8)
2603 return SelectStore(Node, 3, AArch64::ST3Threev16b);
2604 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2605 return SelectStore(Node, 3, AArch64::ST3Threev4h);
2606 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2607 return SelectStore(Node, 3, AArch64::ST3Threev8h);
2608 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2609 return SelectStore(Node, 3, AArch64::ST3Threev2s);
2610 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2611 return SelectStore(Node, 3, AArch64::ST3Threev4s);
2612 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2613 return SelectStore(Node, 3, AArch64::ST3Threev2d);
2614 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2615 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2618 case Intrinsic::aarch64_neon_st4: {
2619 if (VT == MVT::v8i8)
2620 return SelectStore(Node, 4, AArch64::ST4Fourv8b);
2621 else if (VT == MVT::v16i8)
2622 return SelectStore(Node, 4, AArch64::ST4Fourv16b);
2623 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2624 return SelectStore(Node, 4, AArch64::ST4Fourv4h);
2625 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2626 return SelectStore(Node, 4, AArch64::ST4Fourv8h);
2627 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2628 return SelectStore(Node, 4, AArch64::ST4Fourv2s);
2629 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2630 return SelectStore(Node, 4, AArch64::ST4Fourv4s);
2631 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2632 return SelectStore(Node, 4, AArch64::ST4Fourv2d);
2633 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2634 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2637 case Intrinsic::aarch64_neon_st2lane: {
2638 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2639 return SelectStoreLane(Node, 2, AArch64::ST2i8);
2640 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2642 return SelectStoreLane(Node, 2, AArch64::ST2i16);
2643 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2645 return SelectStoreLane(Node, 2, AArch64::ST2i32);
2646 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2648 return SelectStoreLane(Node, 2, AArch64::ST2i64);
2651 case Intrinsic::aarch64_neon_st3lane: {
2652 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2653 return SelectStoreLane(Node, 3, AArch64::ST3i8);
2654 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2656 return SelectStoreLane(Node, 3, AArch64::ST3i16);
2657 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2659 return SelectStoreLane(Node, 3, AArch64::ST3i32);
2660 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2662 return SelectStoreLane(Node, 3, AArch64::ST3i64);
2665 case Intrinsic::aarch64_neon_st4lane: {
2666 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2667 return SelectStoreLane(Node, 4, AArch64::ST4i8);
2668 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2670 return SelectStoreLane(Node, 4, AArch64::ST4i16);
2671 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2673 return SelectStoreLane(Node, 4, AArch64::ST4i32);
2674 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2676 return SelectStoreLane(Node, 4, AArch64::ST4i64);
2681 case AArch64ISD::LD2post: {
2682 if (VT == MVT::v8i8)
2683 return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
2684 else if (VT == MVT::v16i8)
2685 return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
2686 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2687 return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
2688 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2689 return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
2690 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2691 return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
2692 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2693 return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
2694 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2695 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2696 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2697 return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
2700 case AArch64ISD::LD3post: {
2701 if (VT == MVT::v8i8)
2702 return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
2703 else if (VT == MVT::v16i8)
2704 return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
2705 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2706 return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
2707 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2708 return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
2709 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2710 return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
2711 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2712 return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
2713 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2714 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2715 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2716 return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
2719 case AArch64ISD::LD4post: {
2720 if (VT == MVT::v8i8)
2721 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
2722 else if (VT == MVT::v16i8)
2723 return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
2724 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2725 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
2726 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2727 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
2728 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2729 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
2730 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2731 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
2732 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2733 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2734 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2735 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
2738 case AArch64ISD::LD1x2post: {
2739 if (VT == MVT::v8i8)
2740 return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
2741 else if (VT == MVT::v16i8)
2742 return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
2743 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2744 return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
2745 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2746 return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
2747 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2748 return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
2749 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2750 return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
2751 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2752 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2753 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2754 return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
2757 case AArch64ISD::LD1x3post: {
2758 if (VT == MVT::v8i8)
2759 return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
2760 else if (VT == MVT::v16i8)
2761 return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
2762 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2763 return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
2764 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2765 return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
2766 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2767 return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
2768 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2769 return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
2770 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2771 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2772 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2773 return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
2776 case AArch64ISD::LD1x4post: {
2777 if (VT == MVT::v8i8)
2778 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
2779 else if (VT == MVT::v16i8)
2780 return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
2781 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2782 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
2783 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2784 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
2785 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2786 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
2787 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2788 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
2789 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2790 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2791 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2792 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
2795 case AArch64ISD::LD1DUPpost: {
2796 if (VT == MVT::v8i8)
2797 return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
2798 else if (VT == MVT::v16i8)
2799 return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
2800 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2801 return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
2802 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2803 return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
2804 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2805 return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
2806 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2807 return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
2808 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2809 return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
2810 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2811 return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
2814 case AArch64ISD::LD2DUPpost: {
2815 if (VT == MVT::v8i8)
2816 return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
2817 else if (VT == MVT::v16i8)
2818 return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
2819 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2820 return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
2821 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2822 return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
2823 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2824 return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
2825 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2826 return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
2827 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2828 return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
2829 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2830 return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
2833 case AArch64ISD::LD3DUPpost: {
2834 if (VT == MVT::v8i8)
2835 return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
2836 else if (VT == MVT::v16i8)
2837 return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
2838 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2839 return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
2840 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2841 return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
2842 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2843 return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
2844 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2845 return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
2846 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2847 return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
2848 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2849 return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
2852 case AArch64ISD::LD4DUPpost: {
2853 if (VT == MVT::v8i8)
2854 return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
2855 else if (VT == MVT::v16i8)
2856 return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
2857 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2858 return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
2859 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2860 return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
2861 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2862 return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
2863 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2864 return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
2865 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2866 return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
2867 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2868 return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
2871 case AArch64ISD::LD1LANEpost: {
2872 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2873 return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
2874 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2876 return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
2877 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2879 return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
2880 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2882 return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
2885 case AArch64ISD::LD2LANEpost: {
2886 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2887 return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
2888 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2890 return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
2891 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2893 return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
2894 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2896 return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
2899 case AArch64ISD::LD3LANEpost: {
2900 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2901 return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
2902 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2904 return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
2905 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2907 return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
2908 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2910 return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
2913 case AArch64ISD::LD4LANEpost: {
2914 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2915 return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
2916 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2918 return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
2919 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2921 return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
2922 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2924 return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
2927 case AArch64ISD::ST2post: {
2928 VT = Node->getOperand(1).getValueType();
2929 if (VT == MVT::v8i8)
2930 return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
2931 else if (VT == MVT::v16i8)
2932 return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
2933 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2934 return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
2935 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2936 return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
2937 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2938 return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
2939 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2940 return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
2941 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2942 return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
2943 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2944 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
2947 case AArch64ISD::ST3post: {
2948 VT = Node->getOperand(1).getValueType();
2949 if (VT == MVT::v8i8)
2950 return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
2951 else if (VT == MVT::v16i8)
2952 return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
2953 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2954 return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
2955 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2956 return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
2957 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2958 return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
2959 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2960 return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
2961 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2962 return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
2963 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2964 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
2967 case AArch64ISD::ST4post: {
2968 VT = Node->getOperand(1).getValueType();
2969 if (VT == MVT::v8i8)
2970 return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
2971 else if (VT == MVT::v16i8)
2972 return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
2973 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2974 return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
2975 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2976 return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
2977 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2978 return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
2979 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2980 return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
2981 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2982 return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
2983 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2984 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
2987 case AArch64ISD::ST1x2post: {
2988 VT = Node->getOperand(1).getValueType();
2989 if (VT == MVT::v8i8)
2990 return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
2991 else if (VT == MVT::v16i8)
2992 return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
2993 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2994 return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
2995 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2996 return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
2997 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2998 return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
2999 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3000 return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
3001 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3002 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3003 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3004 return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
3007 case AArch64ISD::ST1x3post: {
3008 VT = Node->getOperand(1).getValueType();
3009 if (VT == MVT::v8i8)
3010 return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
3011 else if (VT == MVT::v16i8)
3012 return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
3013 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3014 return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
3015 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3016 return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
3017 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3018 return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
3019 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3020 return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
3021 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3022 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3023 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3024 return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
3027 case AArch64ISD::ST1x4post: {
3028 VT = Node->getOperand(1).getValueType();
3029 if (VT == MVT::v8i8)
3030 return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
3031 else if (VT == MVT::v16i8)
3032 return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
3033 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3034 return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
3035 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3036 return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
3037 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3038 return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
3039 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3040 return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
3041 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3042 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3043 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3044 return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
3047 case AArch64ISD::ST2LANEpost: {
3048 VT = Node->getOperand(1).getValueType();
3049 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3050 return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
3051 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3053 return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
3054 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3056 return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
3057 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3059 return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
3062 case AArch64ISD::ST3LANEpost: {
3063 VT = Node->getOperand(1).getValueType();
3064 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3065 return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
3066 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3068 return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
3069 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3071 return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
3072 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3074 return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
3077 case AArch64ISD::ST4LANEpost: {
3078 VT = Node->getOperand(1).getValueType();
3079 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3080 return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
3081 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3083 return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
3084 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3086 return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
3087 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3089 return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
3097 if (SDNode *I = SelectLIBM(Node))
3102 // Select the default instruction
3103 ResNode = SelectCode(Node);
3105 DEBUG(errs() << "=> ");
3106 if (ResNode == nullptr || ResNode == Node)
3107 DEBUG(Node->dump(CurDAG));
3109 DEBUG(ResNode->dump(CurDAG));
3110 DEBUG(errs() << "\n");
3115 /// createAArch64ISelDag - This pass converts a legalized DAG into a
3116 /// AArch64-specific DAG, ready for instruction scheduling.
3117 FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
3118 CodeGenOpt::Level OptLevel) {
3119 return new AArch64DAGToDAGISel(TM, OptLevel);