1 //===-- ARM64ISelDAGToDAG.cpp - A dag to dag inst selector for ARM64 ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM64 target.
12 //===----------------------------------------------------------------------===//
14 #include "ARM64TargetMachine.h"
15 #include "MCTargetDesc/ARM64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
28 #define DEBUG_TYPE "arm64-isel"
30 //===--------------------------------------------------------------------===//
31 /// ARM64DAGToDAGISel - ARM64 specific code to select ARM64 machine
32 /// instructions for SelectionDAG operations.
36 class ARM64DAGToDAGISel : public SelectionDAGISel {
37 ARM64TargetMachine &TM;
39 /// Subtarget - Keep a pointer to the ARM64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const ARM64Subtarget *Subtarget;
46 explicit ARM64DAGToDAGISel(ARM64TargetMachine &tm, CodeGenOpt::Level OptLevel)
47 : SelectionDAGISel(tm, OptLevel), TM(tm),
48 Subtarget(&TM.getSubtarget<ARM64Subtarget>()), ForCodeSize(false) {}
50 const char *getPassName() const override {
51 return "ARM64 Instruction Selection";
54 bool runOnMachineFunction(MachineFunction &MF) override {
55 AttributeSet FnAttrs = MF.getFunction()->getAttributes();
57 FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
58 Attribute::OptimizeForSize) ||
59 FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
60 return SelectionDAGISel::runOnMachineFunction(MF);
63 SDNode *Select(SDNode *Node) override;
65 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
66 /// inline asm expressions.
67 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
69 std::vector<SDValue> &OutOps) override;
71 SDNode *SelectMLAV64LaneV128(SDNode *N);
72 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
73 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
74 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
75 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
76 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
77 return SelectShiftedRegister(N, false, Reg, Shift);
79 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
80 return SelectShiftedRegister(N, true, Reg, Shift);
82 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
83 return SelectAddrModeIndexed(N, 1, Base, OffImm);
85 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
86 return SelectAddrModeIndexed(N, 2, Base, OffImm);
88 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
89 return SelectAddrModeIndexed(N, 4, Base, OffImm);
91 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
92 return SelectAddrModeIndexed(N, 8, Base, OffImm);
94 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
95 return SelectAddrModeIndexed(N, 16, Base, OffImm);
97 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
98 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
100 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
101 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
103 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
104 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
106 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
107 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
109 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
110 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
113 bool SelectAddrModeRO8(SDValue N, SDValue &Base, SDValue &Offset,
115 return SelectAddrModeRO(N, 1, Base, Offset, Imm);
117 bool SelectAddrModeRO16(SDValue N, SDValue &Base, SDValue &Offset,
119 return SelectAddrModeRO(N, 2, Base, Offset, Imm);
121 bool SelectAddrModeRO32(SDValue N, SDValue &Base, SDValue &Offset,
123 return SelectAddrModeRO(N, 4, Base, Offset, Imm);
125 bool SelectAddrModeRO64(SDValue N, SDValue &Base, SDValue &Offset,
127 return SelectAddrModeRO(N, 8, Base, Offset, Imm);
129 bool SelectAddrModeRO128(SDValue N, SDValue &Base, SDValue &Offset,
131 return SelectAddrModeRO(N, 16, Base, Offset, Imm);
133 bool SelectAddrModeNoIndex(SDValue N, SDValue &Val);
135 /// Form sequences of consecutive 64/128-bit registers for use in NEON
136 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
137 /// between 1 and 4 elements. If it contains a single element that is returned
138 /// unchanged; otherwise a REG_SEQUENCE value is returned.
139 SDValue createDTuple(ArrayRef<SDValue> Vecs);
140 SDValue createQTuple(ArrayRef<SDValue> Vecs);
142 /// Generic helper for the createDTuple/createQTuple
143 /// functions. Those should almost always be called instead.
144 SDValue createTuple(ArrayRef<SDValue> Vecs, unsigned RegClassIDs[],
147 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
149 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
151 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
153 SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
155 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
156 SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
158 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
159 SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
160 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
161 SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
163 SDNode *SelectSIMDAddSubNarrowing(unsigned IntNo, SDNode *Node);
164 SDNode *SelectSIMDXtnNarrowing(unsigned IntNo, SDNode *Node);
166 SDNode *SelectBitfieldExtractOp(SDNode *N);
167 SDNode *SelectBitfieldInsertOp(SDNode *N);
169 SDNode *SelectLIBM(SDNode *N);
171 // Include the pieces autogenerated from the target description.
172 #include "ARM64GenDAGISel.inc"
175 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
177 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
179 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
181 bool SelectAddrModeRO(SDValue N, unsigned Size, SDValue &Base,
182 SDValue &Offset, SDValue &Imm);
183 bool isWorthFolding(SDValue V) const;
184 bool SelectExtendedSHL(SDValue N, unsigned Size, SDValue &Offset,
187 template<unsigned RegWidth>
188 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
189 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
192 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
194 } // end anonymous namespace
196 /// isIntImmediate - This method tests to see if the node is a constant
197 /// operand. If so Imm will receive the 32-bit value.
198 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
199 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
200 Imm = C->getZExtValue();
206 // isIntImmediate - This method tests to see if a constant operand.
207 // If so Imm will receive the value.
208 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
209 return isIntImmediate(N.getNode(), Imm);
212 // isOpcWithIntImmediate - This method tests to see if the node is a specific
213 // opcode and that it has a immediate integer right operand.
214 // If so Imm will receive the 32 bit value.
215 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
217 return N->getOpcode() == Opc &&
218 isIntImmediate(N->getOperand(1).getNode(), Imm);
221 bool ARM64DAGToDAGISel::SelectAddrModeNoIndex(SDValue N, SDValue &Val) {
222 EVT ValTy = N.getValueType();
223 if (ValTy != MVT::i64)
229 bool ARM64DAGToDAGISel::SelectInlineAsmMemoryOperand(
230 const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps) {
231 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
232 // Require the address to be in a register. That is safe for all ARM64
233 // variants and it is hard to do anything much smarter without knowing
234 // how the operand is used.
235 OutOps.push_back(Op);
239 /// SelectArithImmed - Select an immediate value that can be represented as
240 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
241 /// Val set to the 12-bit value and Shift set to the shifter operand.
242 bool ARM64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
244 // This function is called from the addsub_shifted_imm ComplexPattern,
245 // which lists [imm] as the list of opcode it's interested in, however
246 // we still need to check whether the operand is actually an immediate
247 // here because the ComplexPattern opcode list is only used in
248 // root-level opcode matching.
249 if (!isa<ConstantSDNode>(N.getNode()))
252 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
255 if (Immed >> 12 == 0) {
257 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
263 unsigned ShVal = ARM64_AM::getShifterImm(ARM64_AM::LSL, ShiftAmt);
264 Val = CurDAG->getTargetConstant(Immed, MVT::i32);
265 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
269 /// SelectNegArithImmed - As above, but negates the value before trying to
271 bool ARM64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
273 // This function is called from the addsub_shifted_imm ComplexPattern,
274 // which lists [imm] as the list of opcode it's interested in, however
275 // we still need to check whether the operand is actually an immediate
276 // here because the ComplexPattern opcode list is only used in
277 // root-level opcode matching.
278 if (!isa<ConstantSDNode>(N.getNode()))
281 // The immediate operand must be a 24-bit zero-extended immediate.
282 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
284 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
285 // have the opposite effect on the C flag, so this pattern mustn't match under
286 // those circumstances.
290 if (N.getValueType() == MVT::i32)
291 Immed = ~((uint32_t)Immed) + 1;
293 Immed = ~Immed + 1ULL;
294 if (Immed & 0xFFFFFFFFFF000000ULL)
297 Immed &= 0xFFFFFFULL;
298 return SelectArithImmed(CurDAG->getConstant(Immed, MVT::i32), Val, Shift);
301 /// getShiftTypeForNode - Translate a shift node to the corresponding
303 static ARM64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
304 switch (N.getOpcode()) {
306 return ARM64_AM::InvalidShiftExtend;
308 return ARM64_AM::LSL;
310 return ARM64_AM::LSR;
312 return ARM64_AM::ASR;
314 return ARM64_AM::ROR;
318 /// \brief Determine wether it is worth to fold V into an extended register.
319 bool ARM64DAGToDAGISel::isWorthFolding(SDValue V) const {
320 // it hurts if the a value is used at least twice, unless we are optimizing
322 if (ForCodeSize || V.hasOneUse())
327 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
328 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
329 /// instructions allow the shifted register to be rotated, but the arithmetic
330 /// instructions do not. The AllowROR parameter specifies whether ROR is
332 bool ARM64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
333 SDValue &Reg, SDValue &Shift) {
334 ARM64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
335 if (ShType == ARM64_AM::InvalidShiftExtend)
337 if (!AllowROR && ShType == ARM64_AM::ROR)
340 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
341 unsigned BitSize = N.getValueType().getSizeInBits();
342 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
343 unsigned ShVal = ARM64_AM::getShifterImm(ShType, Val);
345 Reg = N.getOperand(0);
346 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
347 return isWorthFolding(N);
353 /// getExtendTypeForNode - Translate an extend node to the corresponding
354 /// ExtendType value.
355 static ARM64_AM::ShiftExtendType
356 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
357 if (N.getOpcode() == ISD::SIGN_EXTEND ||
358 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
360 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
361 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
363 SrcVT = N.getOperand(0).getValueType();
365 if (!IsLoadStore && SrcVT == MVT::i8)
366 return ARM64_AM::SXTB;
367 else if (!IsLoadStore && SrcVT == MVT::i16)
368 return ARM64_AM::SXTH;
369 else if (SrcVT == MVT::i32)
370 return ARM64_AM::SXTW;
371 else if (SrcVT == MVT::i64)
372 return ARM64_AM::SXTX;
374 return ARM64_AM::InvalidShiftExtend;
375 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
376 N.getOpcode() == ISD::ANY_EXTEND) {
377 EVT SrcVT = N.getOperand(0).getValueType();
378 if (!IsLoadStore && SrcVT == MVT::i8)
379 return ARM64_AM::UXTB;
380 else if (!IsLoadStore && SrcVT == MVT::i16)
381 return ARM64_AM::UXTH;
382 else if (SrcVT == MVT::i32)
383 return ARM64_AM::UXTW;
384 else if (SrcVT == MVT::i64)
385 return ARM64_AM::UXTX;
387 return ARM64_AM::InvalidShiftExtend;
388 } else if (N.getOpcode() == ISD::AND) {
389 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
391 return ARM64_AM::InvalidShiftExtend;
392 uint64_t AndMask = CSD->getZExtValue();
396 return ARM64_AM::InvalidShiftExtend;
398 return !IsLoadStore ? ARM64_AM::UXTB : ARM64_AM::InvalidShiftExtend;
400 return !IsLoadStore ? ARM64_AM::UXTH : ARM64_AM::InvalidShiftExtend;
402 return ARM64_AM::UXTW;
406 return ARM64_AM::InvalidShiftExtend;
409 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
410 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
411 if (DL->getOpcode() != ARM64ISD::DUPLANE16 &&
412 DL->getOpcode() != ARM64ISD::DUPLANE32)
415 SDValue SV = DL->getOperand(0);
416 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
419 SDValue EV = SV.getOperand(1);
420 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
423 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
424 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
425 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
426 LaneOp = EV.getOperand(0);
431 // Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
432 // high lane extract.
433 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
434 SDValue &LaneOp, int &LaneIdx) {
436 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
438 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
445 /// SelectMLAV64LaneV128 - ARM64 supports vector MLAs where one multiplicand is
446 /// a lane in the upper half of a 128-bit vector. Recognize and select this so
447 /// that we don't emit unnecessary lane extracts.
448 SDNode *ARM64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
449 SDValue Op0 = N->getOperand(0);
450 SDValue Op1 = N->getOperand(1);
451 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
452 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
453 int LaneIdx = -1; // Will hold the lane index.
455 if (Op1.getOpcode() != ISD::MUL ||
456 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
459 if (Op1.getOpcode() != ISD::MUL ||
460 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
465 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
467 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
469 unsigned MLAOpc = ~0U;
471 switch (N->getSimpleValueType(0).SimpleTy) {
473 llvm_unreachable("Unrecognized MLA.");
475 MLAOpc = ARM64::MLAv4i16_indexed;
478 MLAOpc = ARM64::MLAv8i16_indexed;
481 MLAOpc = ARM64::MLAv2i32_indexed;
484 MLAOpc = ARM64::MLAv4i32_indexed;
488 return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops);
491 SDNode *ARM64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
496 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
500 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
502 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
504 unsigned SMULLOpc = ~0U;
506 if (IntNo == Intrinsic::arm64_neon_smull) {
507 switch (N->getSimpleValueType(0).SimpleTy) {
509 llvm_unreachable("Unrecognized SMULL.");
511 SMULLOpc = ARM64::SMULLv4i16_indexed;
514 SMULLOpc = ARM64::SMULLv2i32_indexed;
517 } else if (IntNo == Intrinsic::arm64_neon_umull) {
518 switch (N->getSimpleValueType(0).SimpleTy) {
520 llvm_unreachable("Unrecognized SMULL.");
522 SMULLOpc = ARM64::UMULLv4i16_indexed;
525 SMULLOpc = ARM64::UMULLv2i32_indexed;
529 llvm_unreachable("Unrecognized intrinsic.");
531 return CurDAG->getMachineNode(SMULLOpc, SDLoc(N), N->getValueType(0), Ops);
534 /// SelectArithExtendedRegister - Select a "extended register" operand. This
535 /// operand folds in an extend followed by an optional left shift.
536 bool ARM64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
538 unsigned ShiftVal = 0;
539 ARM64_AM::ShiftExtendType Ext;
541 if (N.getOpcode() == ISD::SHL) {
542 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
545 ShiftVal = CSD->getZExtValue();
549 Ext = getExtendTypeForNode(N.getOperand(0));
550 if (Ext == ARM64_AM::InvalidShiftExtend)
553 Reg = N.getOperand(0).getOperand(0);
555 Ext = getExtendTypeForNode(N);
556 if (Ext == ARM64_AM::InvalidShiftExtend)
559 Reg = N.getOperand(0);
562 // ARM64 mandates that the RHS of the operation must use the smallest
563 // register classs that could contain the size being extended from. Thus,
564 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
565 // there might not be an actual 32-bit value in the program. We can
566 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
567 if (Reg.getValueType() == MVT::i64 && Ext != ARM64_AM::UXTX &&
568 Ext != ARM64_AM::SXTX) {
569 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
570 MachineSDNode *Node = CurDAG->getMachineNode(
571 TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32, Reg, SubReg);
572 Reg = SDValue(Node, 0);
575 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), MVT::i32);
576 return isWorthFolding(N);
579 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
580 /// immediate" address. The "Size" argument is the size in bytes of the memory
581 /// reference, which determines the scale.
582 bool ARM64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
583 SDValue &Base, SDValue &OffImm) {
584 const TargetLowering *TLI = getTargetLowering();
585 if (N.getOpcode() == ISD::FrameIndex) {
586 int FI = cast<FrameIndexSDNode>(N)->getIndex();
587 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
588 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
592 if (N.getOpcode() == ARM64ISD::ADDlow) {
593 GlobalAddressSDNode *GAN =
594 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
595 Base = N.getOperand(0);
596 OffImm = N.getOperand(1);
600 const GlobalValue *GV = GAN->getGlobal();
601 unsigned Alignment = GV->getAlignment();
602 const DataLayout *DL = TLI->getDataLayout();
603 if (Alignment == 0 && !Subtarget->isTargetDarwin())
604 Alignment = DL->getABITypeAlignment(GV->getType()->getElementType());
606 if (Alignment >= Size)
610 if (CurDAG->isBaseWithConstantOffset(N)) {
611 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
612 int64_t RHSC = (int64_t)RHS->getZExtValue();
613 unsigned Scale = Log2_32(Size);
614 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
615 Base = N.getOperand(0);
616 if (Base.getOpcode() == ISD::FrameIndex) {
617 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
618 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
620 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, MVT::i64);
626 // Before falling back to our general case, check if the unscaled
627 // instructions can handle this. If so, that's preferable.
628 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
631 // Base only. The address will be materialized into a register before
632 // the memory is accessed.
633 // add x0, Xbase, #offset
636 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
640 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
641 /// immediate" address. This should only match when there is an offset that
642 /// is not valid for a scaled immediate addressing mode. The "Size" argument
643 /// is the size in bytes of the memory reference, which is needed here to know
644 /// what is valid for a scaled immediate.
645 bool ARM64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
646 SDValue &Base, SDValue &OffImm) {
647 if (!CurDAG->isBaseWithConstantOffset(N))
649 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
650 int64_t RHSC = RHS->getSExtValue();
651 // If the offset is valid as a scaled immediate, don't match here.
652 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
653 RHSC < (0x1000 << Log2_32(Size)))
655 if (RHSC >= -256 && RHSC < 256) {
656 Base = N.getOperand(0);
657 if (Base.getOpcode() == ISD::FrameIndex) {
658 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
659 const TargetLowering *TLI = getTargetLowering();
660 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
662 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i64);
669 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
670 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
671 SDValue ImpDef = SDValue(
672 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SDLoc(N), MVT::i64),
674 MachineSDNode *Node = CurDAG->getMachineNode(
675 TargetOpcode::INSERT_SUBREG, SDLoc(N), MVT::i64, ImpDef, N, SubReg);
676 return SDValue(Node, 0);
679 static SDValue WidenIfNeeded(SelectionDAG *CurDAG, SDValue N) {
680 if (N.getValueType() == MVT::i32) {
681 return Widen(CurDAG, N);
687 /// \brief Check if the given SHL node (\p N), can be used to form an
688 /// extended register for an addressing mode.
689 bool ARM64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
690 SDValue &Offset, SDValue &Imm) {
691 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
692 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
693 if (CSD && (CSD->getZExtValue() & 0x7) == CSD->getZExtValue()) {
695 ARM64_AM::ShiftExtendType Ext = getExtendTypeForNode(N.getOperand(0), true);
696 if (Ext == ARM64_AM::InvalidShiftExtend) {
697 Ext = ARM64_AM::UXTX;
698 Offset = WidenIfNeeded(CurDAG, N.getOperand(0));
700 Offset = WidenIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
703 unsigned LegalShiftVal = Log2_32(Size);
704 unsigned ShiftVal = CSD->getZExtValue();
706 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
709 Imm = CurDAG->getTargetConstant(
710 ARM64_AM::getMemExtendImm(Ext, ShiftVal != 0), MVT::i32);
711 if (isWorthFolding(N))
717 bool ARM64DAGToDAGISel::SelectAddrModeRO(SDValue N, unsigned Size,
718 SDValue &Base, SDValue &Offset,
720 if (N.getOpcode() != ISD::ADD)
722 SDValue LHS = N.getOperand(0);
723 SDValue RHS = N.getOperand(1);
725 // We don't want to match immediate adds here, because they are better lowered
726 // to the register-immediate addressing modes.
727 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
730 // Check if this particular node is reused in any non-memory related
731 // operation. If yes, do not try to fold this node into the address
732 // computation, since the computation will be kept.
733 const SDNode *Node = N.getNode();
734 for (SDNode *UI : Node->uses()) {
735 if (!isa<MemSDNode>(*UI))
739 // Remember if it is worth folding N when it produces extended register.
740 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
742 // Try to match a shifted extend on the RHS.
743 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
744 SelectExtendedSHL(RHS, Size, Offset, Imm)) {
749 // Try to match a shifted extend on the LHS.
750 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
751 SelectExtendedSHL(LHS, Size, Offset, Imm)) {
756 ARM64_AM::ShiftExtendType Ext = ARM64_AM::UXTX;
757 // Try to match an unshifted extend on the LHS.
758 if (IsExtendedRegisterWorthFolding &&
759 (Ext = getExtendTypeForNode(LHS, true)) != ARM64_AM::InvalidShiftExtend) {
761 Offset = WidenIfNeeded(CurDAG, LHS.getOperand(0));
762 Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
764 if (isWorthFolding(LHS))
768 // Try to match an unshifted extend on the RHS.
769 if (IsExtendedRegisterWorthFolding &&
770 (Ext = getExtendTypeForNode(RHS, true)) != ARM64_AM::InvalidShiftExtend) {
772 Offset = WidenIfNeeded(CurDAG, RHS.getOperand(0));
773 Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
775 if (isWorthFolding(RHS))
779 // Match any non-shifted, non-extend, non-immediate add expression.
781 Offset = WidenIfNeeded(CurDAG, RHS);
782 Ext = ARM64_AM::UXTX;
783 Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
785 // Reg1 + Reg2 is free: no check needed.
789 SDValue ARM64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
790 static unsigned RegClassIDs[] = { ARM64::DDRegClassID, ARM64::DDDRegClassID,
791 ARM64::DDDDRegClassID };
792 static unsigned SubRegs[] = { ARM64::dsub0, ARM64::dsub1,
793 ARM64::dsub2, ARM64::dsub3 };
795 return createTuple(Regs, RegClassIDs, SubRegs);
798 SDValue ARM64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
799 static unsigned RegClassIDs[] = { ARM64::QQRegClassID, ARM64::QQQRegClassID,
800 ARM64::QQQQRegClassID };
801 static unsigned SubRegs[] = { ARM64::qsub0, ARM64::qsub1,
802 ARM64::qsub2, ARM64::qsub3 };
804 return createTuple(Regs, RegClassIDs, SubRegs);
807 SDValue ARM64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
808 unsigned RegClassIDs[],
809 unsigned SubRegs[]) {
810 // There's no special register-class for a vector-list of 1 element: it's just
812 if (Regs.size() == 1)
815 assert(Regs.size() >= 2 && Regs.size() <= 4);
817 SDLoc DL(Regs[0].getNode());
819 SmallVector<SDValue, 4> Ops;
821 // First operand of REG_SEQUENCE is the desired RegClass.
823 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
825 // Then we get pairs of source & subregister-position for the components.
826 for (unsigned i = 0; i < Regs.size(); ++i) {
827 Ops.push_back(Regs[i]);
828 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
832 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
833 return SDValue(N, 0);
836 SDNode *ARM64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
837 unsigned Opc, bool isExt) {
839 EVT VT = N->getValueType(0);
841 unsigned ExtOff = isExt;
843 // Form a REG_SEQUENCE to force register allocation.
844 unsigned Vec0Off = ExtOff + 1;
845 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
846 N->op_begin() + Vec0Off + NumVecs);
847 SDValue RegSeq = createQTuple(Regs);
849 SmallVector<SDValue, 6> Ops;
851 Ops.push_back(N->getOperand(1));
852 Ops.push_back(RegSeq);
853 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
854 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
857 SDNode *ARM64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
858 LoadSDNode *LD = cast<LoadSDNode>(N);
859 if (LD->isUnindexed())
861 EVT VT = LD->getMemoryVT();
862 EVT DstVT = N->getValueType(0);
863 ISD::MemIndexedMode AM = LD->getAddressingMode();
864 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
866 // We're not doing validity checking here. That was done when checking
867 // if we should mark the load as indexed or not. We're just selecting
868 // the right instruction.
871 ISD::LoadExtType ExtType = LD->getExtensionType();
872 bool InsertTo64 = false;
874 Opcode = IsPre ? ARM64::LDRXpre_isel : ARM64::LDRXpost_isel;
875 else if (VT == MVT::i32) {
876 if (ExtType == ISD::NON_EXTLOAD)
877 Opcode = IsPre ? ARM64::LDRWpre_isel : ARM64::LDRWpost_isel;
878 else if (ExtType == ISD::SEXTLOAD)
879 Opcode = IsPre ? ARM64::LDRSWpre_isel : ARM64::LDRSWpost_isel;
881 Opcode = IsPre ? ARM64::LDRWpre_isel : ARM64::LDRWpost_isel;
883 // The result of the load is only i32. It's the subreg_to_reg that makes
887 } else if (VT == MVT::i16) {
888 if (ExtType == ISD::SEXTLOAD) {
889 if (DstVT == MVT::i64)
890 Opcode = IsPre ? ARM64::LDRSHXpre_isel : ARM64::LDRSHXpost_isel;
892 Opcode = IsPre ? ARM64::LDRSHWpre_isel : ARM64::LDRSHWpost_isel;
894 Opcode = IsPre ? ARM64::LDRHHpre_isel : ARM64::LDRHHpost_isel;
895 InsertTo64 = DstVT == MVT::i64;
896 // The result of the load is only i32. It's the subreg_to_reg that makes
900 } else if (VT == MVT::i8) {
901 if (ExtType == ISD::SEXTLOAD) {
902 if (DstVT == MVT::i64)
903 Opcode = IsPre ? ARM64::LDRSBXpre_isel : ARM64::LDRSBXpost_isel;
905 Opcode = IsPre ? ARM64::LDRSBWpre_isel : ARM64::LDRSBWpost_isel;
907 Opcode = IsPre ? ARM64::LDRBBpre_isel : ARM64::LDRBBpost_isel;
908 InsertTo64 = DstVT == MVT::i64;
909 // The result of the load is only i32. It's the subreg_to_reg that makes
913 } else if (VT == MVT::f32) {
914 Opcode = IsPre ? ARM64::LDRSpre_isel : ARM64::LDRSpost_isel;
915 } else if (VT == MVT::f64 || VT.is64BitVector()) {
916 Opcode = IsPre ? ARM64::LDRDpre_isel : ARM64::LDRDpost_isel;
917 } else if (VT.is128BitVector()) {
918 Opcode = IsPre ? ARM64::LDRQpre_isel : ARM64::LDRQpost_isel;
921 SDValue Chain = LD->getChain();
922 SDValue Base = LD->getBasePtr();
923 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
924 int OffsetVal = (int)OffsetOp->getZExtValue();
925 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, MVT::i64);
926 SDValue Ops[] = { Base, Offset, Chain };
927 SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), DstVT, MVT::i64,
929 // Either way, we're replacing the node, so tell the caller that.
932 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
933 SDNode *Sub = CurDAG->getMachineNode(
934 ARM64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
935 CurDAG->getTargetConstant(0, MVT::i64), SDValue(Res, 0), SubReg);
936 ReplaceUses(SDValue(N, 0), SDValue(Sub, 0));
937 ReplaceUses(SDValue(N, 1), SDValue(Res, 1));
938 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
944 SDNode *ARM64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
945 unsigned SubRegIdx) {
947 EVT VT = N->getValueType(0);
948 SDValue Chain = N->getOperand(0);
950 SmallVector<SDValue, 6> Ops;
951 Ops.push_back(N->getOperand(2)); // Mem operand;
952 Ops.push_back(Chain);
954 std::vector<EVT> ResTys;
955 ResTys.push_back(MVT::Untyped);
956 ResTys.push_back(MVT::Other);
958 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
959 SDValue SuperReg = SDValue(Ld, 0);
960 for (unsigned i = 0; i < NumVecs; ++i)
961 ReplaceUses(SDValue(N, i),
962 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
964 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
968 SDNode *ARM64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
969 unsigned Opc, unsigned SubRegIdx) {
971 EVT VT = N->getValueType(0);
972 SDValue Chain = N->getOperand(0);
974 SmallVector<SDValue, 6> Ops;
975 Ops.push_back(N->getOperand(1)); // Mem operand
976 Ops.push_back(N->getOperand(2)); // Incremental
977 Ops.push_back(Chain);
979 std::vector<EVT> ResTys;
980 ResTys.push_back(MVT::i64); // Type of the write back register
981 ResTys.push_back(MVT::Untyped);
982 ResTys.push_back(MVT::Other);
984 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
986 // Update uses of write back register
987 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
989 // Update uses of vector list
990 SDValue SuperReg = SDValue(Ld, 1);
992 ReplaceUses(SDValue(N, 0), SuperReg);
994 for (unsigned i = 0; i < NumVecs; ++i)
995 ReplaceUses(SDValue(N, i),
996 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
999 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1003 SDNode *ARM64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1006 EVT VT = N->getOperand(2)->getValueType(0);
1008 // Form a REG_SEQUENCE to force register allocation.
1009 bool Is128Bit = VT.getSizeInBits() == 128;
1010 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1011 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1013 SmallVector<SDValue, 6> Ops;
1014 Ops.push_back(RegSeq);
1015 Ops.push_back(N->getOperand(NumVecs + 2));
1016 Ops.push_back(N->getOperand(0));
1017 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1022 SDNode *ARM64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1025 EVT VT = N->getOperand(2)->getValueType(0);
1026 SmallVector<EVT, 2> ResTys;
1027 ResTys.push_back(MVT::i64); // Type of the write back register
1028 ResTys.push_back(MVT::Other); // Type for the Chain
1030 // Form a REG_SEQUENCE to force register allocation.
1031 bool Is128Bit = VT.getSizeInBits() == 128;
1032 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1033 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1035 SmallVector<SDValue, 6> Ops;
1036 Ops.push_back(RegSeq);
1037 Ops.push_back(N->getOperand(NumVecs + 1)); // base register
1038 Ops.push_back(N->getOperand(NumVecs + 2)); // Incremental
1039 Ops.push_back(N->getOperand(0)); // Chain
1040 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1045 /// WidenVector - Given a value in the V64 register class, produce the
1046 /// equivalent value in the V128 register class.
1051 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1053 SDValue operator()(SDValue V64Reg) {
1054 EVT VT = V64Reg.getValueType();
1055 unsigned NarrowSize = VT.getVectorNumElements();
1056 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1057 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1061 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1062 return DAG.getTargetInsertSubreg(ARM64::dsub, DL, WideTy, Undef, V64Reg);
1066 /// NarrowVector - Given a value in the V128 register class, produce the
1067 /// equivalent value in the V64 register class.
1068 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1069 EVT VT = V128Reg.getValueType();
1070 unsigned WideSize = VT.getVectorNumElements();
1071 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1072 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1074 return DAG.getTargetExtractSubreg(ARM64::dsub, SDLoc(V128Reg), NarrowTy,
1078 SDNode *ARM64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1081 EVT VT = N->getValueType(0);
1082 bool Narrow = VT.getSizeInBits() == 64;
1084 // Form a REG_SEQUENCE to force register allocation.
1085 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1088 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1089 WidenVector(*CurDAG));
1091 SDValue RegSeq = createQTuple(Regs);
1093 std::vector<EVT> ResTys;
1094 ResTys.push_back(MVT::Untyped);
1095 ResTys.push_back(MVT::Other);
1098 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1100 SmallVector<SDValue, 6> Ops;
1101 Ops.push_back(RegSeq);
1102 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1103 Ops.push_back(N->getOperand(NumVecs + 3));
1104 Ops.push_back(N->getOperand(0));
1105 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1106 SDValue SuperReg = SDValue(Ld, 0);
1108 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1109 static unsigned QSubs[] = { ARM64::qsub0, ARM64::qsub1, ARM64::qsub2,
1111 for (unsigned i = 0; i < NumVecs; ++i) {
1112 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1114 NV = NarrowVector(NV, *CurDAG);
1115 ReplaceUses(SDValue(N, i), NV);
1118 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1123 SDNode *ARM64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1126 EVT VT = N->getValueType(0);
1127 bool Narrow = VT.getSizeInBits() == 64;
1129 // Form a REG_SEQUENCE to force register allocation.
1130 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1133 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1134 WidenVector(*CurDAG));
1136 SDValue RegSeq = createQTuple(Regs);
1138 std::vector<EVT> ResTys;
1139 ResTys.push_back(MVT::i64); // Type of the write back register
1140 ResTys.push_back(MVT::Untyped);
1141 ResTys.push_back(MVT::Other);
1144 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1146 SmallVector<SDValue, 6> Ops;
1147 Ops.push_back(RegSeq);
1148 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64)); // Lane Number
1149 Ops.push_back(N->getOperand(NumVecs + 2)); // Base register
1150 Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
1151 Ops.push_back(N->getOperand(0));
1152 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1154 // Update uses of the write back register
1155 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1157 // Update uses of the vector list
1158 SDValue SuperReg = SDValue(Ld, 1);
1160 ReplaceUses(SDValue(N, 0),
1161 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1163 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1164 static unsigned QSubs[] = { ARM64::qsub0, ARM64::qsub1, ARM64::qsub2,
1166 for (unsigned i = 0; i < NumVecs; ++i) {
1167 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1170 NV = NarrowVector(NV, *CurDAG);
1171 ReplaceUses(SDValue(N, i), NV);
1176 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1181 SDNode *ARM64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1184 EVT VT = N->getOperand(2)->getValueType(0);
1185 bool Narrow = VT.getSizeInBits() == 64;
1187 // Form a REG_SEQUENCE to force register allocation.
1188 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1191 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1192 WidenVector(*CurDAG));
1194 SDValue RegSeq = createQTuple(Regs);
1197 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1199 SmallVector<SDValue, 6> Ops;
1200 Ops.push_back(RegSeq);
1201 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1202 Ops.push_back(N->getOperand(NumVecs + 3));
1203 Ops.push_back(N->getOperand(0));
1204 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1206 // Transfer memoperands.
1207 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1208 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1209 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1214 SDNode *ARM64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1217 EVT VT = N->getOperand(2)->getValueType(0);
1218 bool Narrow = VT.getSizeInBits() == 64;
1220 // Form a REG_SEQUENCE to force register allocation.
1221 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1224 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1225 WidenVector(*CurDAG));
1227 SDValue RegSeq = createQTuple(Regs);
1229 SmallVector<EVT, 2> ResTys;
1230 ResTys.push_back(MVT::i64); // Type of the write back register
1231 ResTys.push_back(MVT::Other);
1234 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1236 SmallVector<SDValue, 6> Ops;
1237 Ops.push_back(RegSeq);
1238 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1239 Ops.push_back(N->getOperand(NumVecs + 2)); // Base Register
1240 Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
1241 Ops.push_back(N->getOperand(0));
1242 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1244 // Transfer memoperands.
1245 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1246 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1247 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1252 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1253 unsigned &Opc, SDValue &Opd0,
1254 unsigned &LSB, unsigned &MSB,
1255 unsigned NumberOfIgnoredLowBits,
1256 bool BiggerPattern) {
1257 assert(N->getOpcode() == ISD::AND &&
1258 "N must be a AND operation to call this function");
1260 EVT VT = N->getValueType(0);
1262 // Here we can test the type of VT and return false when the type does not
1263 // match, but since it is done prior to that call in the current context
1264 // we turned that into an assert to avoid redundant code.
1265 assert((VT == MVT::i32 || VT == MVT::i64) &&
1266 "Type checking must have been done before calling this function");
1268 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1269 // changed the AND node to a 32-bit mask operation. We'll have to
1270 // undo that as part of the transform here if we want to catch all
1271 // the opportunities.
1272 // Currently the NumberOfIgnoredLowBits argument helps to recover
1273 // form these situations when matching bigger pattern (bitfield insert).
1275 // For unsigned extracts, check for a shift right and mask
1276 uint64_t And_imm = 0;
1277 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1280 const SDNode *Op0 = N->getOperand(0).getNode();
1282 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1283 // simplified. Try to undo that
1284 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1286 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1287 if (And_imm & (And_imm + 1))
1290 bool ClampMSB = false;
1291 uint64_t Srl_imm = 0;
1292 // Handle the SRL + ANY_EXTEND case.
1293 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1294 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1295 // Extend the incoming operand of the SRL to 64-bit.
1296 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1297 // Make sure to clamp the MSB so that we preserve the semantics of the
1298 // original operations.
1300 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1301 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1303 // If the shift result was truncated, we can still combine them.
1304 Opd0 = Op0->getOperand(0).getOperand(0);
1306 // Use the type of SRL node.
1307 VT = Opd0->getValueType(0);
1308 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1309 Opd0 = Op0->getOperand(0);
1310 } else if (BiggerPattern) {
1311 // Let's pretend a 0 shift right has been performed.
1312 // The resulting code will be at least as good as the original one
1313 // plus it may expose more opportunities for bitfield insert pattern.
1314 // FIXME: Currently we limit this to the bigger pattern, because
1315 // some optimizations expect AND and not UBFM
1316 Opd0 = N->getOperand(0);
1320 assert((BiggerPattern || (Srl_imm > 0 && Srl_imm < VT.getSizeInBits())) &&
1321 "bad amount in shift node!");
1324 MSB = Srl_imm + (VT == MVT::i32 ? CountTrailingOnes_32(And_imm)
1325 : CountTrailingOnes_64(And_imm)) -
1328 // Since we're moving the extend before the right shift operation, we need
1329 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1330 // the zeros which would get shifted in with the original right shift
1332 MSB = MSB > 31 ? 31 : MSB;
1334 Opc = VT == MVT::i32 ? ARM64::UBFMWri : ARM64::UBFMXri;
1338 static bool isOneBitExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1339 unsigned &LSB, unsigned &MSB) {
1340 // We are looking for the following pattern which basically extracts a single
1341 // bit from the source value and places it in the LSB of the destination
1342 // value, all other bits of the destination value or set to zero:
1344 // Value2 = AND Value, MaskImm
1345 // SRL Value2, ShiftImm
1347 // with MaskImm >> ShiftImm == 1.
1349 // This gets selected into a single UBFM:
1351 // UBFM Value, ShiftImm, ShiftImm
1354 if (N->getOpcode() != ISD::SRL)
1357 uint64_t And_mask = 0;
1358 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1361 Opd0 = N->getOperand(0).getOperand(0);
1363 uint64_t Srl_imm = 0;
1364 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1367 // Check whether we really have a one bit extract here.
1368 if (And_mask >> Srl_imm == 0x1) {
1369 if (N->getValueType(0) == MVT::i32)
1370 Opc = ARM64::UBFMWri;
1372 Opc = ARM64::UBFMXri;
1374 LSB = MSB = Srl_imm;
1382 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1383 unsigned &LSB, unsigned &MSB,
1384 bool BiggerPattern) {
1385 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1386 "N must be a SHR/SRA operation to call this function");
1388 EVT VT = N->getValueType(0);
1390 // Here we can test the type of VT and return false when the type does not
1391 // match, but since it is done prior to that call in the current context
1392 // we turned that into an assert to avoid redundant code.
1393 assert((VT == MVT::i32 || VT == MVT::i64) &&
1394 "Type checking must have been done before calling this function");
1396 // Check for AND + SRL doing a one bit extract.
1397 if (isOneBitExtractOpFromShr(N, Opc, Opd0, LSB, MSB))
1400 // we're looking for a shift of a shift
1401 uint64_t Shl_imm = 0;
1402 uint64_t Trunc_bits = 0;
1403 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1404 Opd0 = N->getOperand(0).getOperand(0);
1405 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1406 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1407 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1408 // be considered as setting high 32 bits as zero. Our strategy here is to
1409 // always generate 64bit UBFM. This consistency will help the CSE pass
1410 // later find more redundancy.
1411 Opd0 = N->getOperand(0).getOperand(0);
1412 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1413 VT = Opd0->getValueType(0);
1414 assert(VT == MVT::i64 && "the promoted type should be i64");
1415 } else if (BiggerPattern) {
1416 // Let's pretend a 0 shift left has been performed.
1417 // FIXME: Currently we limit this to the bigger pattern case,
1418 // because some optimizations expect AND and not UBFM
1419 Opd0 = N->getOperand(0);
1423 assert(Shl_imm < VT.getSizeInBits() && "bad amount in shift node!");
1424 uint64_t Srl_imm = 0;
1425 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1428 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1429 "bad amount in shift node!");
1430 // Note: The width operand is encoded as width-1.
1431 unsigned Width = VT.getSizeInBits() - Trunc_bits - Srl_imm - 1;
1432 int sLSB = Srl_imm - Shl_imm;
1437 // SRA requires a signed extraction
1439 Opc = N->getOpcode() == ISD::SRA ? ARM64::SBFMWri : ARM64::UBFMWri;
1441 Opc = N->getOpcode() == ISD::SRA ? ARM64::SBFMXri : ARM64::UBFMXri;
1445 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1446 SDValue &Opd0, unsigned &LSB, unsigned &MSB,
1447 unsigned NumberOfIgnoredLowBits = 0,
1448 bool BiggerPattern = false) {
1449 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1452 switch (N->getOpcode()) {
1454 if (!N->isMachineOpcode())
1458 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB, MSB,
1459 NumberOfIgnoredLowBits, BiggerPattern);
1462 return isBitfieldExtractOpFromShr(N, Opc, Opd0, LSB, MSB, BiggerPattern);
1465 unsigned NOpc = N->getMachineOpcode();
1469 case ARM64::SBFMWri:
1470 case ARM64::UBFMWri:
1471 case ARM64::SBFMXri:
1472 case ARM64::UBFMXri:
1474 Opd0 = N->getOperand(0);
1475 LSB = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1476 MSB = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1483 SDNode *ARM64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1484 unsigned Opc, LSB, MSB;
1486 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
1489 EVT VT = N->getValueType(0);
1491 // If the bit extract operation is 64bit but the original type is 32bit, we
1492 // need to add one EXTRACT_SUBREG.
1493 if ((Opc == ARM64::SBFMXri || Opc == ARM64::UBFMXri) && VT == MVT::i32) {
1494 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, MVT::i64),
1495 CurDAG->getTargetConstant(MSB, MVT::i64)};
1497 SDNode *BFM = CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i64, Ops64);
1498 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
1499 MachineSDNode *Node =
1500 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32,
1501 SDValue(BFM, 0), SubReg);
1505 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, VT),
1506 CurDAG->getTargetConstant(MSB, VT)};
1507 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1510 /// Does DstMask form a complementary pair with the mask provided by
1511 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1512 /// this asks whether DstMask zeroes precisely those bits that will be set by
1514 static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1515 unsigned NumberOfIgnoredHighBits, EVT VT) {
1516 assert((VT == MVT::i32 || VT == MVT::i64) &&
1517 "i32 or i64 mask type expected!");
1518 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1520 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1521 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1523 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1524 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1527 // Look for bits that will be useful for later uses.
1528 // A bit is consider useless as soon as it is dropped and never used
1529 // before it as been dropped.
1530 // E.g., looking for useful bit of x
1533 // After #1, x useful bits are 0x7, then the useful bits of x, live through
1535 // After #2, the useful bits of x are 0x4.
1536 // However, if x is used on an unpredicatable instruction, then all its bits
1542 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1544 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1547 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1548 Imm = ARM64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1549 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1550 getUsefulBits(Op, UsefulBits, Depth + 1);
1553 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1554 uint64_t Imm, uint64_t MSB,
1556 // inherit the bitwidth value
1557 APInt OpUsefulBits(UsefulBits);
1561 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1563 // The interesting part will be in the lower part of the result
1564 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1565 // The interesting part was starting at Imm in the argument
1566 OpUsefulBits = OpUsefulBits.shl(Imm);
1568 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1570 // The interesting part will be shifted in the result
1571 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1572 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1573 // The interesting part was at zero in the argument
1574 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1577 UsefulBits &= OpUsefulBits;
1580 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1583 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1585 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1587 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1590 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1592 uint64_t ShiftTypeAndValue =
1593 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1594 APInt Mask(UsefulBits);
1595 Mask.clearAllBits();
1598 if (ARM64_AM::getShiftType(ShiftTypeAndValue) == ARM64_AM::LSL) {
1600 uint64_t ShiftAmt = ARM64_AM::getShiftValue(ShiftTypeAndValue);
1601 Mask = Mask.shl(ShiftAmt);
1602 getUsefulBits(Op, Mask, Depth + 1);
1603 Mask = Mask.lshr(ShiftAmt);
1604 } else if (ARM64_AM::getShiftType(ShiftTypeAndValue) == ARM64_AM::LSR) {
1606 // We do not handle ARM64_AM::ASR, because the sign will change the
1607 // number of useful bits
1608 uint64_t ShiftAmt = ARM64_AM::getShiftValue(ShiftTypeAndValue);
1609 Mask = Mask.lshr(ShiftAmt);
1610 getUsefulBits(Op, Mask, Depth + 1);
1611 Mask = Mask.shl(ShiftAmt);
1618 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1621 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1623 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1625 if (Op.getOperand(1) == Orig)
1626 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1628 APInt OpUsefulBits(UsefulBits);
1632 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1634 UsefulBits &= ~OpUsefulBits;
1635 getUsefulBits(Op, UsefulBits, Depth + 1);
1637 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1639 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1640 getUsefulBits(Op, UsefulBits, Depth + 1);
1644 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1645 SDValue Orig, unsigned Depth) {
1647 // Users of this node should have already been instruction selected
1648 // FIXME: Can we turn that into an assert?
1649 if (!UserNode->isMachineOpcode())
1652 switch (UserNode->getMachineOpcode()) {
1655 case ARM64::ANDSWri:
1656 case ARM64::ANDSXri:
1659 // We increment Depth only when we call the getUsefulBits
1660 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1662 case ARM64::UBFMWri:
1663 case ARM64::UBFMXri:
1664 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1668 if (UserNode->getOperand(1) != Orig)
1670 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1674 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1678 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1681 // Initialize UsefulBits
1683 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1684 // At the beginning, assume every produced bits is useful
1685 UsefulBits = APInt(Bitwidth, 0);
1686 UsefulBits.flipAllBits();
1688 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1690 for (SDNode *Node : Op.getNode()->uses()) {
1691 // A use cannot produce useful bits
1692 APInt UsefulBitsForUse = APInt(UsefulBits);
1693 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1694 UsersUsefulBits |= UsefulBitsForUse;
1696 // UsefulBits contains the produced bits that are meaningful for the
1697 // current definition, thus a user cannot make a bit meaningful at
1699 UsefulBits &= UsersUsefulBits;
1702 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
1703 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1704 /// 0, return Op unchanged.
1705 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1709 EVT VT = Op.getValueType();
1710 unsigned BitWidth = VT.getSizeInBits();
1711 unsigned UBFMOpc = BitWidth == 32 ? ARM64::UBFMWri : ARM64::UBFMXri;
1714 if (ShlAmount > 0) {
1715 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1716 ShiftNode = CurDAG->getMachineNode(
1717 UBFMOpc, SDLoc(Op), VT, Op,
1718 CurDAG->getTargetConstant(BitWidth - ShlAmount, VT),
1719 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, VT));
1721 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1722 assert(ShlAmount < 0 && "expected right shift");
1723 int ShrAmount = -ShlAmount;
1724 ShiftNode = CurDAG->getMachineNode(
1725 UBFMOpc, SDLoc(Op), VT, Op, CurDAG->getTargetConstant(ShrAmount, VT),
1726 CurDAG->getTargetConstant(BitWidth - 1, VT));
1729 return SDValue(ShiftNode, 0);
1732 /// Does this tree qualify as an attempt to move a bitfield into position,
1733 /// essentially "(and (shl VAL, N), Mask)".
1734 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1735 SDValue &Src, int &ShiftAmount,
1737 EVT VT = Op.getValueType();
1738 unsigned BitWidth = VT.getSizeInBits();
1740 assert(BitWidth == 32 || BitWidth == 64);
1742 APInt KnownZero, KnownOne;
1743 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
1745 // Non-zero in the sense that they're not provably zero, which is the key
1746 // point if we want to use this value
1747 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1749 // Discard a constant AND mask if present. It's safe because the node will
1750 // already have been factored into the computeKnownBits calculation above.
1752 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1753 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1754 Op = Op.getOperand(0);
1758 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1760 Op = Op.getOperand(0);
1762 if (!isShiftedMask_64(NonZeroBits))
1765 ShiftAmount = countTrailingZeros(NonZeroBits);
1766 MaskWidth = CountTrailingOnes_64(NonZeroBits >> ShiftAmount);
1768 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1769 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1771 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1776 // Given a OR operation, check if we have the following pattern
1777 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
1778 // isBitfieldExtractOp)
1779 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1780 // countTrailingZeros(mask2) == imm2 - imm + 1
1782 // if yes, given reference arguments will be update so that one can replace
1783 // the OR instruction with:
1784 // f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1785 static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1786 SDValue &Src, unsigned &ImmR,
1787 unsigned &ImmS, SelectionDAG *CurDAG) {
1788 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1791 EVT VT = N->getValueType(0);
1793 Opc = ARM64::BFMWri;
1794 else if (VT == MVT::i64)
1795 Opc = ARM64::BFMXri;
1799 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1800 // have the expected shape. Try to undo that.
1802 getUsefulBits(SDValue(N, 0), UsefulBits);
1804 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1805 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1807 // OR is commutative, check both possibilities (does llvm provide a
1808 // way to do that directely, e.g., via code matcher?)
1809 SDValue OrOpd1Val = N->getOperand(1);
1810 SDNode *OrOpd0 = N->getOperand(0).getNode();
1811 SDNode *OrOpd1 = N->getOperand(1).getNode();
1812 for (int i = 0; i < 2;
1813 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
1816 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
1817 NumberOfIgnoredLowBits, true)) {
1818 // Check that the returned opcode is compatible with the pattern,
1819 // i.e., same type and zero extended (U and not S)
1820 if ((BFXOpc != ARM64::UBFMXri && VT == MVT::i64) ||
1821 (BFXOpc != ARM64::UBFMWri && VT == MVT::i32))
1824 // Compute the width of the bitfield insertion
1826 Width = ImmS - ImmR + 1;
1827 // FIXME: This constraint is to catch bitfield insertion we may
1828 // want to widen the pattern if we want to grab general bitfied
1833 // If the mask on the insertee is correct, we have a BFXIL operation. We
1834 // can share the ImmR and ImmS values from the already-computed UBFM.
1835 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
1837 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
1842 // Check the second part of the pattern
1843 EVT VT = OrOpd1->getValueType(0);
1844 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
1846 // Compute the Known Zero for the candidate of the first operand.
1847 // This allows to catch more general case than just looking for
1848 // AND with imm. Indeed, simplify-demanded-bits may have removed
1849 // the AND instruction because it proves it was useless.
1850 APInt KnownZero, KnownOne;
1851 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
1853 // Check if there is enough room for the second operand to appear
1855 APInt BitsToBeInserted =
1856 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
1858 if ((BitsToBeInserted & ~KnownZero) != 0)
1861 // Set the first operand
1863 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
1864 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
1865 // In that case, we can eliminate the AND
1866 Dst = OrOpd1->getOperand(0);
1868 // Maybe the AND has been removed by simplify-demanded-bits
1869 // or is useful because it discards more bits
1879 SDNode *ARM64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
1880 if (N->getOpcode() != ISD::OR)
1887 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
1890 EVT VT = N->getValueType(0);
1891 SDValue Ops[] = { Opd0,
1893 CurDAG->getTargetConstant(LSB, VT),
1894 CurDAG->getTargetConstant(MSB, VT) };
1895 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1898 SDNode *ARM64DAGToDAGISel::SelectLIBM(SDNode *N) {
1899 EVT VT = N->getValueType(0);
1902 unsigned FRINTXOpcs[] = { ARM64::FRINTXSr, ARM64::FRINTXDr };
1904 if (VT == MVT::f32) {
1906 } else if (VT == MVT::f64) {
1909 return nullptr; // Unrecognized argument type. Fall back on default codegen.
1911 // Pick the FRINTX variant needed to set the flags.
1912 unsigned FRINTXOpc = FRINTXOpcs[Variant];
1914 switch (N->getOpcode()) {
1916 return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
1918 unsigned FRINTPOpcs[] = { ARM64::FRINTPSr, ARM64::FRINTPDr };
1919 Opc = FRINTPOpcs[Variant];
1923 unsigned FRINTMOpcs[] = { ARM64::FRINTMSr, ARM64::FRINTMDr };
1924 Opc = FRINTMOpcs[Variant];
1928 unsigned FRINTZOpcs[] = { ARM64::FRINTZSr, ARM64::FRINTZDr };
1929 Opc = FRINTZOpcs[Variant];
1933 unsigned FRINTAOpcs[] = { ARM64::FRINTASr, ARM64::FRINTADr };
1934 Opc = FRINTAOpcs[Variant];
1940 SDValue In = N->getOperand(0);
1941 SmallVector<SDValue, 2> Ops;
1944 if (!TM.Options.UnsafeFPMath) {
1945 SDNode *FRINTX = CurDAG->getMachineNode(FRINTXOpc, dl, VT, MVT::Glue, In);
1946 Ops.push_back(SDValue(FRINTX, 1));
1949 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
1953 ARM64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
1954 unsigned RegWidth) {
1956 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
1957 FVal = CN->getValueAPF();
1958 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
1959 // Some otherwise illegal constants are allowed in this case.
1960 if (LN->getOperand(1).getOpcode() != ARM64ISD::ADDlow ||
1961 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
1964 ConstantPoolSDNode *CN =
1965 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
1966 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
1970 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
1971 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
1974 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
1975 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
1979 // fbits is between 1 and 64 in the worst-case, which means the fmul
1980 // could have 2^64 as an actual operand. Need 65 bits of precision.
1981 APSInt IntVal(65, true);
1982 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
1984 // N.b. isPowerOf2 also checks for > 0.
1985 if (!IsExact || !IntVal.isPowerOf2()) return false;
1986 unsigned FBits = IntVal.logBase2();
1988 // Checks above should have guaranteed that we haven't lost information in
1989 // finding FBits, but it must still be in range.
1990 if (FBits == 0 || FBits > RegWidth) return false;
1992 FixedPos = CurDAG->getTargetConstant(FBits, MVT::i32);
1996 SDNode *ARM64DAGToDAGISel::Select(SDNode *Node) {
1997 // Dump information about the Node being selected
1998 DEBUG(errs() << "Selecting: ");
1999 DEBUG(Node->dump(CurDAG));
2000 DEBUG(errs() << "\n");
2002 // If we have a custom node, we already have selected!
2003 if (Node->isMachineOpcode()) {
2004 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2005 Node->setNodeId(-1);
2009 // Few custom selection stuff.
2010 SDNode *ResNode = nullptr;
2011 EVT VT = Node->getValueType(0);
2013 switch (Node->getOpcode()) {
2018 if (SDNode *I = SelectMLAV64LaneV128(Node))
2023 // Try to select as an indexed load. Fall through to normal processing
2026 SDNode *I = SelectIndexedLoad(Node, Done);
2035 if (SDNode *I = SelectBitfieldExtractOp(Node))
2040 if (SDNode *I = SelectBitfieldInsertOp(Node))
2044 case ISD::EXTRACT_VECTOR_ELT: {
2045 // Extracting lane zero is a special case where we can just use a plain
2046 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2047 // the rest of the compiler, especially the register allocator and copyi
2048 // propagation, to reason about, so is preferred when it's possible to
2050 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2051 // Bail and use the default Select() for non-zero lanes.
2052 if (LaneNode->getZExtValue() != 0)
2054 // If the element type is not the same as the result type, likewise
2055 // bail and use the default Select(), as there's more to do than just
2056 // a cross-class COPY. This catches extracts of i8 and i16 elements
2057 // since they will need an explicit zext.
2058 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2061 switch (Node->getOperand(0)
2063 .getVectorElementType()
2066 assert(0 && "Unexpected vector element type!");
2068 SubReg = ARM64::dsub;
2071 SubReg = ARM64::ssub;
2073 case 16: // FALLTHROUGH
2075 llvm_unreachable("unexpected zext-requiring extract element!");
2077 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2078 Node->getOperand(0));
2079 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2080 DEBUG(Extract->dumpr(CurDAG));
2081 DEBUG(dbgs() << "\n");
2082 return Extract.getNode();
2084 case ISD::Constant: {
2085 // Materialize zero constants as copies from WZR/XZR. This allows
2086 // the coalescer to propagate these into other instructions.
2087 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2088 if (ConstNode->isNullValue()) {
2090 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2091 ARM64::WZR, MVT::i32).getNode();
2092 else if (VT == MVT::i64)
2093 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2094 ARM64::XZR, MVT::i64).getNode();
2099 case ISD::FrameIndex: {
2100 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2101 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2102 unsigned Shifter = ARM64_AM::getShifterImm(ARM64_AM::LSL, 0);
2103 const TargetLowering *TLI = getTargetLowering();
2104 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
2105 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2106 CurDAG->getTargetConstant(Shifter, MVT::i32) };
2107 return CurDAG->SelectNodeTo(Node, ARM64::ADDXri, MVT::i64, Ops);
2109 case ISD::INTRINSIC_W_CHAIN: {
2110 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2114 case Intrinsic::arm64_ldaxp:
2115 case Intrinsic::arm64_ldxp: {
2117 IntNo == Intrinsic::arm64_ldaxp ? ARM64::LDAXPX : ARM64::LDXPX;
2118 SDValue MemAddr = Node->getOperand(2);
2120 SDValue Chain = Node->getOperand(0);
2122 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2123 MVT::Other, MemAddr, Chain);
2125 // Transfer memoperands.
2126 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2127 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2128 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2131 case Intrinsic::arm64_stlxp:
2132 case Intrinsic::arm64_stxp: {
2134 IntNo == Intrinsic::arm64_stlxp ? ARM64::STLXPX : ARM64::STXPX;
2136 SDValue Chain = Node->getOperand(0);
2137 SDValue ValLo = Node->getOperand(2);
2138 SDValue ValHi = Node->getOperand(3);
2139 SDValue MemAddr = Node->getOperand(4);
2141 // Place arguments in the right order.
2142 SmallVector<SDValue, 7> Ops;
2143 Ops.push_back(ValLo);
2144 Ops.push_back(ValHi);
2145 Ops.push_back(MemAddr);
2146 Ops.push_back(Chain);
2148 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2149 // Transfer memoperands.
2150 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2151 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2152 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2156 case Intrinsic::arm64_neon_ld1x2:
2157 if (VT == MVT::v8i8)
2158 return SelectLoad(Node, 2, ARM64::LD1Twov8b, ARM64::dsub0);
2159 else if (VT == MVT::v16i8)
2160 return SelectLoad(Node, 2, ARM64::LD1Twov16b, ARM64::qsub0);
2161 else if (VT == MVT::v4i16)
2162 return SelectLoad(Node, 2, ARM64::LD1Twov4h, ARM64::dsub0);
2163 else if (VT == MVT::v8i16)
2164 return SelectLoad(Node, 2, ARM64::LD1Twov8h, ARM64::qsub0);
2165 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2166 return SelectLoad(Node, 2, ARM64::LD1Twov2s, ARM64::dsub0);
2167 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2168 return SelectLoad(Node, 2, ARM64::LD1Twov4s, ARM64::qsub0);
2169 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2170 return SelectLoad(Node, 2, ARM64::LD1Twov1d, ARM64::dsub0);
2171 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2172 return SelectLoad(Node, 2, ARM64::LD1Twov2d, ARM64::qsub0);
2174 case Intrinsic::arm64_neon_ld1x3:
2175 if (VT == MVT::v8i8)
2176 return SelectLoad(Node, 3, ARM64::LD1Threev8b, ARM64::dsub0);
2177 else if (VT == MVT::v16i8)
2178 return SelectLoad(Node, 3, ARM64::LD1Threev16b, ARM64::qsub0);
2179 else if (VT == MVT::v4i16)
2180 return SelectLoad(Node, 3, ARM64::LD1Threev4h, ARM64::dsub0);
2181 else if (VT == MVT::v8i16)
2182 return SelectLoad(Node, 3, ARM64::LD1Threev8h, ARM64::qsub0);
2183 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2184 return SelectLoad(Node, 3, ARM64::LD1Threev2s, ARM64::dsub0);
2185 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2186 return SelectLoad(Node, 3, ARM64::LD1Threev4s, ARM64::qsub0);
2187 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2188 return SelectLoad(Node, 3, ARM64::LD1Threev1d, ARM64::dsub0);
2189 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2190 return SelectLoad(Node, 3, ARM64::LD1Threev2d, ARM64::qsub0);
2192 case Intrinsic::arm64_neon_ld1x4:
2193 if (VT == MVT::v8i8)
2194 return SelectLoad(Node, 4, ARM64::LD1Fourv8b, ARM64::dsub0);
2195 else if (VT == MVT::v16i8)
2196 return SelectLoad(Node, 4, ARM64::LD1Fourv16b, ARM64::qsub0);
2197 else if (VT == MVT::v4i16)
2198 return SelectLoad(Node, 4, ARM64::LD1Fourv4h, ARM64::dsub0);
2199 else if (VT == MVT::v8i16)
2200 return SelectLoad(Node, 4, ARM64::LD1Fourv8h, ARM64::qsub0);
2201 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2202 return SelectLoad(Node, 4, ARM64::LD1Fourv2s, ARM64::dsub0);
2203 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2204 return SelectLoad(Node, 4, ARM64::LD1Fourv4s, ARM64::qsub0);
2205 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2206 return SelectLoad(Node, 4, ARM64::LD1Fourv1d, ARM64::dsub0);
2207 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2208 return SelectLoad(Node, 4, ARM64::LD1Fourv2d, ARM64::qsub0);
2210 case Intrinsic::arm64_neon_ld2:
2211 if (VT == MVT::v8i8)
2212 return SelectLoad(Node, 2, ARM64::LD2Twov8b, ARM64::dsub0);
2213 else if (VT == MVT::v16i8)
2214 return SelectLoad(Node, 2, ARM64::LD2Twov16b, ARM64::qsub0);
2215 else if (VT == MVT::v4i16)
2216 return SelectLoad(Node, 2, ARM64::LD2Twov4h, ARM64::dsub0);
2217 else if (VT == MVT::v8i16)
2218 return SelectLoad(Node, 2, ARM64::LD2Twov8h, ARM64::qsub0);
2219 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2220 return SelectLoad(Node, 2, ARM64::LD2Twov2s, ARM64::dsub0);
2221 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2222 return SelectLoad(Node, 2, ARM64::LD2Twov4s, ARM64::qsub0);
2223 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2224 return SelectLoad(Node, 2, ARM64::LD1Twov1d, ARM64::dsub0);
2225 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2226 return SelectLoad(Node, 2, ARM64::LD2Twov2d, ARM64::qsub0);
2228 case Intrinsic::arm64_neon_ld3:
2229 if (VT == MVT::v8i8)
2230 return SelectLoad(Node, 3, ARM64::LD3Threev8b, ARM64::dsub0);
2231 else if (VT == MVT::v16i8)
2232 return SelectLoad(Node, 3, ARM64::LD3Threev16b, ARM64::qsub0);
2233 else if (VT == MVT::v4i16)
2234 return SelectLoad(Node, 3, ARM64::LD3Threev4h, ARM64::dsub0);
2235 else if (VT == MVT::v8i16)
2236 return SelectLoad(Node, 3, ARM64::LD3Threev8h, ARM64::qsub0);
2237 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2238 return SelectLoad(Node, 3, ARM64::LD3Threev2s, ARM64::dsub0);
2239 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2240 return SelectLoad(Node, 3, ARM64::LD3Threev4s, ARM64::qsub0);
2241 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2242 return SelectLoad(Node, 3, ARM64::LD1Threev1d, ARM64::dsub0);
2243 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2244 return SelectLoad(Node, 3, ARM64::LD3Threev2d, ARM64::qsub0);
2246 case Intrinsic::arm64_neon_ld4:
2247 if (VT == MVT::v8i8)
2248 return SelectLoad(Node, 4, ARM64::LD4Fourv8b, ARM64::dsub0);
2249 else if (VT == MVT::v16i8)
2250 return SelectLoad(Node, 4, ARM64::LD4Fourv16b, ARM64::qsub0);
2251 else if (VT == MVT::v4i16)
2252 return SelectLoad(Node, 4, ARM64::LD4Fourv4h, ARM64::dsub0);
2253 else if (VT == MVT::v8i16)
2254 return SelectLoad(Node, 4, ARM64::LD4Fourv8h, ARM64::qsub0);
2255 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2256 return SelectLoad(Node, 4, ARM64::LD4Fourv2s, ARM64::dsub0);
2257 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2258 return SelectLoad(Node, 4, ARM64::LD4Fourv4s, ARM64::qsub0);
2259 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2260 return SelectLoad(Node, 4, ARM64::LD1Fourv1d, ARM64::dsub0);
2261 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2262 return SelectLoad(Node, 4, ARM64::LD4Fourv2d, ARM64::qsub0);
2264 case Intrinsic::arm64_neon_ld2r:
2265 if (VT == MVT::v8i8)
2266 return SelectLoad(Node, 2, ARM64::LD2Rv8b, ARM64::dsub0);
2267 else if (VT == MVT::v16i8)
2268 return SelectLoad(Node, 2, ARM64::LD2Rv16b, ARM64::qsub0);
2269 else if (VT == MVT::v4i16)
2270 return SelectLoad(Node, 2, ARM64::LD2Rv4h, ARM64::dsub0);
2271 else if (VT == MVT::v8i16)
2272 return SelectLoad(Node, 2, ARM64::LD2Rv8h, ARM64::qsub0);
2273 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2274 return SelectLoad(Node, 2, ARM64::LD2Rv2s, ARM64::dsub0);
2275 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2276 return SelectLoad(Node, 2, ARM64::LD2Rv4s, ARM64::qsub0);
2277 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2278 return SelectLoad(Node, 2, ARM64::LD2Rv1d, ARM64::dsub0);
2279 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2280 return SelectLoad(Node, 2, ARM64::LD2Rv2d, ARM64::qsub0);
2282 case Intrinsic::arm64_neon_ld3r:
2283 if (VT == MVT::v8i8)
2284 return SelectLoad(Node, 3, ARM64::LD3Rv8b, ARM64::dsub0);
2285 else if (VT == MVT::v16i8)
2286 return SelectLoad(Node, 3, ARM64::LD3Rv16b, ARM64::qsub0);
2287 else if (VT == MVT::v4i16)
2288 return SelectLoad(Node, 3, ARM64::LD3Rv4h, ARM64::dsub0);
2289 else if (VT == MVT::v8i16)
2290 return SelectLoad(Node, 3, ARM64::LD3Rv8h, ARM64::qsub0);
2291 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2292 return SelectLoad(Node, 3, ARM64::LD3Rv2s, ARM64::dsub0);
2293 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2294 return SelectLoad(Node, 3, ARM64::LD3Rv4s, ARM64::qsub0);
2295 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2296 return SelectLoad(Node, 3, ARM64::LD3Rv1d, ARM64::dsub0);
2297 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2298 return SelectLoad(Node, 3, ARM64::LD3Rv2d, ARM64::qsub0);
2300 case Intrinsic::arm64_neon_ld4r:
2301 if (VT == MVT::v8i8)
2302 return SelectLoad(Node, 4, ARM64::LD4Rv8b, ARM64::dsub0);
2303 else if (VT == MVT::v16i8)
2304 return SelectLoad(Node, 4, ARM64::LD4Rv16b, ARM64::qsub0);
2305 else if (VT == MVT::v4i16)
2306 return SelectLoad(Node, 4, ARM64::LD4Rv4h, ARM64::dsub0);
2307 else if (VT == MVT::v8i16)
2308 return SelectLoad(Node, 4, ARM64::LD4Rv8h, ARM64::qsub0);
2309 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2310 return SelectLoad(Node, 4, ARM64::LD4Rv2s, ARM64::dsub0);
2311 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2312 return SelectLoad(Node, 4, ARM64::LD4Rv4s, ARM64::qsub0);
2313 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2314 return SelectLoad(Node, 4, ARM64::LD4Rv1d, ARM64::dsub0);
2315 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2316 return SelectLoad(Node, 4, ARM64::LD4Rv2d, ARM64::qsub0);
2318 case Intrinsic::arm64_neon_ld2lane:
2319 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2320 return SelectLoadLane(Node, 2, ARM64::LD2i8);
2321 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2322 return SelectLoadLane(Node, 2, ARM64::LD2i16);
2323 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2325 return SelectLoadLane(Node, 2, ARM64::LD2i32);
2326 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2328 return SelectLoadLane(Node, 2, ARM64::LD2i64);
2330 case Intrinsic::arm64_neon_ld3lane:
2331 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2332 return SelectLoadLane(Node, 3, ARM64::LD3i8);
2333 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2334 return SelectLoadLane(Node, 3, ARM64::LD3i16);
2335 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2337 return SelectLoadLane(Node, 3, ARM64::LD3i32);
2338 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2340 return SelectLoadLane(Node, 3, ARM64::LD3i64);
2342 case Intrinsic::arm64_neon_ld4lane:
2343 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2344 return SelectLoadLane(Node, 4, ARM64::LD4i8);
2345 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2346 return SelectLoadLane(Node, 4, ARM64::LD4i16);
2347 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2349 return SelectLoadLane(Node, 4, ARM64::LD4i32);
2350 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2352 return SelectLoadLane(Node, 4, ARM64::LD4i64);
2356 case ISD::INTRINSIC_WO_CHAIN: {
2357 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2361 case Intrinsic::arm64_neon_tbl2:
2362 return SelectTable(Node, 2, VT == MVT::v8i8 ? ARM64::TBLv8i8Two
2363 : ARM64::TBLv16i8Two,
2365 case Intrinsic::arm64_neon_tbl3:
2366 return SelectTable(Node, 3, VT == MVT::v8i8 ? ARM64::TBLv8i8Three
2367 : ARM64::TBLv16i8Three,
2369 case Intrinsic::arm64_neon_tbl4:
2370 return SelectTable(Node, 4, VT == MVT::v8i8 ? ARM64::TBLv8i8Four
2371 : ARM64::TBLv16i8Four,
2373 case Intrinsic::arm64_neon_tbx2:
2374 return SelectTable(Node, 2, VT == MVT::v8i8 ? ARM64::TBXv8i8Two
2375 : ARM64::TBXv16i8Two,
2377 case Intrinsic::arm64_neon_tbx3:
2378 return SelectTable(Node, 3, VT == MVT::v8i8 ? ARM64::TBXv8i8Three
2379 : ARM64::TBXv16i8Three,
2381 case Intrinsic::arm64_neon_tbx4:
2382 return SelectTable(Node, 4, VT == MVT::v8i8 ? ARM64::TBXv8i8Four
2383 : ARM64::TBXv16i8Four,
2385 case Intrinsic::arm64_neon_smull:
2386 case Intrinsic::arm64_neon_umull:
2387 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2393 case ISD::INTRINSIC_VOID: {
2394 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2395 if (Node->getNumOperands() >= 3)
2396 VT = Node->getOperand(2)->getValueType(0);
2400 case Intrinsic::arm64_neon_st1x2: {
2401 if (VT == MVT::v8i8)
2402 return SelectStore(Node, 2, ARM64::ST1Twov8b);
2403 else if (VT == MVT::v16i8)
2404 return SelectStore(Node, 2, ARM64::ST1Twov16b);
2405 else if (VT == MVT::v4i16)
2406 return SelectStore(Node, 2, ARM64::ST1Twov4h);
2407 else if (VT == MVT::v8i16)
2408 return SelectStore(Node, 2, ARM64::ST1Twov8h);
2409 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2410 return SelectStore(Node, 2, ARM64::ST1Twov2s);
2411 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2412 return SelectStore(Node, 2, ARM64::ST1Twov4s);
2413 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2414 return SelectStore(Node, 2, ARM64::ST1Twov2d);
2415 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2416 return SelectStore(Node, 2, ARM64::ST1Twov1d);
2419 case Intrinsic::arm64_neon_st1x3: {
2420 if (VT == MVT::v8i8)
2421 return SelectStore(Node, 3, ARM64::ST1Threev8b);
2422 else if (VT == MVT::v16i8)
2423 return SelectStore(Node, 3, ARM64::ST1Threev16b);
2424 else if (VT == MVT::v4i16)
2425 return SelectStore(Node, 3, ARM64::ST1Threev4h);
2426 else if (VT == MVT::v8i16)
2427 return SelectStore(Node, 3, ARM64::ST1Threev8h);
2428 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2429 return SelectStore(Node, 3, ARM64::ST1Threev2s);
2430 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2431 return SelectStore(Node, 3, ARM64::ST1Threev4s);
2432 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2433 return SelectStore(Node, 3, ARM64::ST1Threev2d);
2434 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2435 return SelectStore(Node, 3, ARM64::ST1Threev1d);
2438 case Intrinsic::arm64_neon_st1x4: {
2439 if (VT == MVT::v8i8)
2440 return SelectStore(Node, 4, ARM64::ST1Fourv8b);
2441 else if (VT == MVT::v16i8)
2442 return SelectStore(Node, 4, ARM64::ST1Fourv16b);
2443 else if (VT == MVT::v4i16)
2444 return SelectStore(Node, 4, ARM64::ST1Fourv4h);
2445 else if (VT == MVT::v8i16)
2446 return SelectStore(Node, 4, ARM64::ST1Fourv8h);
2447 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2448 return SelectStore(Node, 4, ARM64::ST1Fourv2s);
2449 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2450 return SelectStore(Node, 4, ARM64::ST1Fourv4s);
2451 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2452 return SelectStore(Node, 4, ARM64::ST1Fourv2d);
2453 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2454 return SelectStore(Node, 4, ARM64::ST1Fourv1d);
2457 case Intrinsic::arm64_neon_st2: {
2458 if (VT == MVT::v8i8)
2459 return SelectStore(Node, 2, ARM64::ST2Twov8b);
2460 else if (VT == MVT::v16i8)
2461 return SelectStore(Node, 2, ARM64::ST2Twov16b);
2462 else if (VT == MVT::v4i16)
2463 return SelectStore(Node, 2, ARM64::ST2Twov4h);
2464 else if (VT == MVT::v8i16)
2465 return SelectStore(Node, 2, ARM64::ST2Twov8h);
2466 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2467 return SelectStore(Node, 2, ARM64::ST2Twov2s);
2468 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2469 return SelectStore(Node, 2, ARM64::ST2Twov4s);
2470 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2471 return SelectStore(Node, 2, ARM64::ST2Twov2d);
2472 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2473 return SelectStore(Node, 2, ARM64::ST1Twov1d);
2476 case Intrinsic::arm64_neon_st3: {
2477 if (VT == MVT::v8i8)
2478 return SelectStore(Node, 3, ARM64::ST3Threev8b);
2479 else if (VT == MVT::v16i8)
2480 return SelectStore(Node, 3, ARM64::ST3Threev16b);
2481 else if (VT == MVT::v4i16)
2482 return SelectStore(Node, 3, ARM64::ST3Threev4h);
2483 else if (VT == MVT::v8i16)
2484 return SelectStore(Node, 3, ARM64::ST3Threev8h);
2485 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2486 return SelectStore(Node, 3, ARM64::ST3Threev2s);
2487 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2488 return SelectStore(Node, 3, ARM64::ST3Threev4s);
2489 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2490 return SelectStore(Node, 3, ARM64::ST3Threev2d);
2491 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2492 return SelectStore(Node, 3, ARM64::ST1Threev1d);
2495 case Intrinsic::arm64_neon_st4: {
2496 if (VT == MVT::v8i8)
2497 return SelectStore(Node, 4, ARM64::ST4Fourv8b);
2498 else if (VT == MVT::v16i8)
2499 return SelectStore(Node, 4, ARM64::ST4Fourv16b);
2500 else if (VT == MVT::v4i16)
2501 return SelectStore(Node, 4, ARM64::ST4Fourv4h);
2502 else if (VT == MVT::v8i16)
2503 return SelectStore(Node, 4, ARM64::ST4Fourv8h);
2504 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2505 return SelectStore(Node, 4, ARM64::ST4Fourv2s);
2506 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2507 return SelectStore(Node, 4, ARM64::ST4Fourv4s);
2508 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2509 return SelectStore(Node, 4, ARM64::ST4Fourv2d);
2510 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2511 return SelectStore(Node, 4, ARM64::ST1Fourv1d);
2514 case Intrinsic::arm64_neon_st2lane: {
2515 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2516 return SelectStoreLane(Node, 2, ARM64::ST2i8);
2517 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2518 return SelectStoreLane(Node, 2, ARM64::ST2i16);
2519 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2521 return SelectStoreLane(Node, 2, ARM64::ST2i32);
2522 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2524 return SelectStoreLane(Node, 2, ARM64::ST2i64);
2527 case Intrinsic::arm64_neon_st3lane: {
2528 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2529 return SelectStoreLane(Node, 3, ARM64::ST3i8);
2530 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2531 return SelectStoreLane(Node, 3, ARM64::ST3i16);
2532 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2534 return SelectStoreLane(Node, 3, ARM64::ST3i32);
2535 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2537 return SelectStoreLane(Node, 3, ARM64::ST3i64);
2540 case Intrinsic::arm64_neon_st4lane: {
2541 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2542 return SelectStoreLane(Node, 4, ARM64::ST4i8);
2543 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2544 return SelectStoreLane(Node, 4, ARM64::ST4i16);
2545 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2547 return SelectStoreLane(Node, 4, ARM64::ST4i32);
2548 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2550 return SelectStoreLane(Node, 4, ARM64::ST4i64);
2555 case ARM64ISD::LD2post: {
2556 if (VT == MVT::v8i8)
2557 return SelectPostLoad(Node, 2, ARM64::LD2Twov8b_POST, ARM64::dsub0);
2558 else if (VT == MVT::v16i8)
2559 return SelectPostLoad(Node, 2, ARM64::LD2Twov16b_POST, ARM64::qsub0);
2560 else if (VT == MVT::v4i16)
2561 return SelectPostLoad(Node, 2, ARM64::LD2Twov4h_POST, ARM64::dsub0);
2562 else if (VT == MVT::v8i16)
2563 return SelectPostLoad(Node, 2, ARM64::LD2Twov8h_POST, ARM64::qsub0);
2564 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2565 return SelectPostLoad(Node, 2, ARM64::LD2Twov2s_POST, ARM64::dsub0);
2566 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2567 return SelectPostLoad(Node, 2, ARM64::LD2Twov4s_POST, ARM64::qsub0);
2568 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2569 return SelectPostLoad(Node, 2, ARM64::LD1Twov1d_POST, ARM64::dsub0);
2570 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2571 return SelectPostLoad(Node, 2, ARM64::LD2Twov2d_POST, ARM64::qsub0);
2574 case ARM64ISD::LD3post: {
2575 if (VT == MVT::v8i8)
2576 return SelectPostLoad(Node, 3, ARM64::LD3Threev8b_POST, ARM64::dsub0);
2577 else if (VT == MVT::v16i8)
2578 return SelectPostLoad(Node, 3, ARM64::LD3Threev16b_POST, ARM64::qsub0);
2579 else if (VT == MVT::v4i16)
2580 return SelectPostLoad(Node, 3, ARM64::LD3Threev4h_POST, ARM64::dsub0);
2581 else if (VT == MVT::v8i16)
2582 return SelectPostLoad(Node, 3, ARM64::LD3Threev8h_POST, ARM64::qsub0);
2583 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2584 return SelectPostLoad(Node, 3, ARM64::LD3Threev2s_POST, ARM64::dsub0);
2585 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2586 return SelectPostLoad(Node, 3, ARM64::LD3Threev4s_POST, ARM64::qsub0);
2587 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2588 return SelectPostLoad(Node, 3, ARM64::LD1Threev1d_POST, ARM64::dsub0);
2589 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2590 return SelectPostLoad(Node, 3, ARM64::LD3Threev2d_POST, ARM64::qsub0);
2593 case ARM64ISD::LD4post: {
2594 if (VT == MVT::v8i8)
2595 return SelectPostLoad(Node, 4, ARM64::LD4Fourv8b_POST, ARM64::dsub0);
2596 else if (VT == MVT::v16i8)
2597 return SelectPostLoad(Node, 4, ARM64::LD4Fourv16b_POST, ARM64::qsub0);
2598 else if (VT == MVT::v4i16)
2599 return SelectPostLoad(Node, 4, ARM64::LD4Fourv4h_POST, ARM64::dsub0);
2600 else if (VT == MVT::v8i16)
2601 return SelectPostLoad(Node, 4, ARM64::LD4Fourv8h_POST, ARM64::qsub0);
2602 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2603 return SelectPostLoad(Node, 4, ARM64::LD4Fourv2s_POST, ARM64::dsub0);
2604 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2605 return SelectPostLoad(Node, 4, ARM64::LD4Fourv4s_POST, ARM64::qsub0);
2606 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2607 return SelectPostLoad(Node, 4, ARM64::LD1Fourv1d_POST, ARM64::dsub0);
2608 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2609 return SelectPostLoad(Node, 4, ARM64::LD4Fourv2d_POST, ARM64::qsub0);
2612 case ARM64ISD::LD1x2post: {
2613 if (VT == MVT::v8i8)
2614 return SelectPostLoad(Node, 2, ARM64::LD1Twov8b_POST, ARM64::dsub0);
2615 else if (VT == MVT::v16i8)
2616 return SelectPostLoad(Node, 2, ARM64::LD1Twov16b_POST, ARM64::qsub0);
2617 else if (VT == MVT::v4i16)
2618 return SelectPostLoad(Node, 2, ARM64::LD1Twov4h_POST, ARM64::dsub0);
2619 else if (VT == MVT::v8i16)
2620 return SelectPostLoad(Node, 2, ARM64::LD1Twov8h_POST, ARM64::qsub0);
2621 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2622 return SelectPostLoad(Node, 2, ARM64::LD1Twov2s_POST, ARM64::dsub0);
2623 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2624 return SelectPostLoad(Node, 2, ARM64::LD1Twov4s_POST, ARM64::qsub0);
2625 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2626 return SelectPostLoad(Node, 2, ARM64::LD1Twov1d_POST, ARM64::dsub0);
2627 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2628 return SelectPostLoad(Node, 2, ARM64::LD1Twov2d_POST, ARM64::qsub0);
2631 case ARM64ISD::LD1x3post: {
2632 if (VT == MVT::v8i8)
2633 return SelectPostLoad(Node, 3, ARM64::LD1Threev8b_POST, ARM64::dsub0);
2634 else if (VT == MVT::v16i8)
2635 return SelectPostLoad(Node, 3, ARM64::LD1Threev16b_POST, ARM64::qsub0);
2636 else if (VT == MVT::v4i16)
2637 return SelectPostLoad(Node, 3, ARM64::LD1Threev4h_POST, ARM64::dsub0);
2638 else if (VT == MVT::v8i16)
2639 return SelectPostLoad(Node, 3, ARM64::LD1Threev8h_POST, ARM64::qsub0);
2640 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2641 return SelectPostLoad(Node, 3, ARM64::LD1Threev2s_POST, ARM64::dsub0);
2642 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2643 return SelectPostLoad(Node, 3, ARM64::LD1Threev4s_POST, ARM64::qsub0);
2644 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2645 return SelectPostLoad(Node, 3, ARM64::LD1Threev1d_POST, ARM64::dsub0);
2646 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2647 return SelectPostLoad(Node, 3, ARM64::LD1Threev2d_POST, ARM64::qsub0);
2650 case ARM64ISD::LD1x4post: {
2651 if (VT == MVT::v8i8)
2652 return SelectPostLoad(Node, 4, ARM64::LD1Fourv8b_POST, ARM64::dsub0);
2653 else if (VT == MVT::v16i8)
2654 return SelectPostLoad(Node, 4, ARM64::LD1Fourv16b_POST, ARM64::qsub0);
2655 else if (VT == MVT::v4i16)
2656 return SelectPostLoad(Node, 4, ARM64::LD1Fourv4h_POST, ARM64::dsub0);
2657 else if (VT == MVT::v8i16)
2658 return SelectPostLoad(Node, 4, ARM64::LD1Fourv8h_POST, ARM64::qsub0);
2659 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2660 return SelectPostLoad(Node, 4, ARM64::LD1Fourv2s_POST, ARM64::dsub0);
2661 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2662 return SelectPostLoad(Node, 4, ARM64::LD1Fourv4s_POST, ARM64::qsub0);
2663 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2664 return SelectPostLoad(Node, 4, ARM64::LD1Fourv1d_POST, ARM64::dsub0);
2665 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2666 return SelectPostLoad(Node, 4, ARM64::LD1Fourv2d_POST, ARM64::qsub0);
2669 case ARM64ISD::LD1DUPpost: {
2670 if (VT == MVT::v8i8)
2671 return SelectPostLoad(Node, 1, ARM64::LD1Rv8b_POST, ARM64::dsub0);
2672 else if (VT == MVT::v16i8)
2673 return SelectPostLoad(Node, 1, ARM64::LD1Rv16b_POST, ARM64::qsub0);
2674 else if (VT == MVT::v4i16)
2675 return SelectPostLoad(Node, 1, ARM64::LD1Rv4h_POST, ARM64::dsub0);
2676 else if (VT == MVT::v8i16)
2677 return SelectPostLoad(Node, 1, ARM64::LD1Rv8h_POST, ARM64::qsub0);
2678 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2679 return SelectPostLoad(Node, 1, ARM64::LD1Rv2s_POST, ARM64::dsub0);
2680 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2681 return SelectPostLoad(Node, 1, ARM64::LD1Rv4s_POST, ARM64::qsub0);
2682 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2683 return SelectPostLoad(Node, 1, ARM64::LD1Rv1d_POST, ARM64::dsub0);
2684 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2685 return SelectPostLoad(Node, 1, ARM64::LD1Rv2d_POST, ARM64::qsub0);
2688 case ARM64ISD::LD2DUPpost: {
2689 if (VT == MVT::v8i8)
2690 return SelectPostLoad(Node, 2, ARM64::LD2Rv8b_POST, ARM64::dsub0);
2691 else if (VT == MVT::v16i8)
2692 return SelectPostLoad(Node, 2, ARM64::LD2Rv16b_POST, ARM64::qsub0);
2693 else if (VT == MVT::v4i16)
2694 return SelectPostLoad(Node, 2, ARM64::LD2Rv4h_POST, ARM64::dsub0);
2695 else if (VT == MVT::v8i16)
2696 return SelectPostLoad(Node, 2, ARM64::LD2Rv8h_POST, ARM64::qsub0);
2697 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2698 return SelectPostLoad(Node, 2, ARM64::LD2Rv2s_POST, ARM64::dsub0);
2699 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2700 return SelectPostLoad(Node, 2, ARM64::LD2Rv4s_POST, ARM64::qsub0);
2701 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2702 return SelectPostLoad(Node, 2, ARM64::LD2Rv1d_POST, ARM64::dsub0);
2703 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2704 return SelectPostLoad(Node, 2, ARM64::LD2Rv2d_POST, ARM64::qsub0);
2707 case ARM64ISD::LD3DUPpost: {
2708 if (VT == MVT::v8i8)
2709 return SelectPostLoad(Node, 3, ARM64::LD3Rv8b_POST, ARM64::dsub0);
2710 else if (VT == MVT::v16i8)
2711 return SelectPostLoad(Node, 3, ARM64::LD3Rv16b_POST, ARM64::qsub0);
2712 else if (VT == MVT::v4i16)
2713 return SelectPostLoad(Node, 3, ARM64::LD3Rv4h_POST, ARM64::dsub0);
2714 else if (VT == MVT::v8i16)
2715 return SelectPostLoad(Node, 3, ARM64::LD3Rv8h_POST, ARM64::qsub0);
2716 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2717 return SelectPostLoad(Node, 3, ARM64::LD3Rv2s_POST, ARM64::dsub0);
2718 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2719 return SelectPostLoad(Node, 3, ARM64::LD3Rv4s_POST, ARM64::qsub0);
2720 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2721 return SelectPostLoad(Node, 3, ARM64::LD3Rv1d_POST, ARM64::dsub0);
2722 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2723 return SelectPostLoad(Node, 3, ARM64::LD3Rv2d_POST, ARM64::qsub0);
2726 case ARM64ISD::LD4DUPpost: {
2727 if (VT == MVT::v8i8)
2728 return SelectPostLoad(Node, 4, ARM64::LD4Rv8b_POST, ARM64::dsub0);
2729 else if (VT == MVT::v16i8)
2730 return SelectPostLoad(Node, 4, ARM64::LD4Rv16b_POST, ARM64::qsub0);
2731 else if (VT == MVT::v4i16)
2732 return SelectPostLoad(Node, 4, ARM64::LD4Rv4h_POST, ARM64::dsub0);
2733 else if (VT == MVT::v8i16)
2734 return SelectPostLoad(Node, 4, ARM64::LD4Rv8h_POST, ARM64::qsub0);
2735 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2736 return SelectPostLoad(Node, 4, ARM64::LD4Rv2s_POST, ARM64::dsub0);
2737 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2738 return SelectPostLoad(Node, 4, ARM64::LD4Rv4s_POST, ARM64::qsub0);
2739 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2740 return SelectPostLoad(Node, 4, ARM64::LD4Rv1d_POST, ARM64::dsub0);
2741 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2742 return SelectPostLoad(Node, 4, ARM64::LD4Rv2d_POST, ARM64::qsub0);
2745 case ARM64ISD::LD1LANEpost: {
2746 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2747 return SelectPostLoadLane(Node, 1, ARM64::LD1i8_POST);
2748 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2749 return SelectPostLoadLane(Node, 1, ARM64::LD1i16_POST);
2750 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2752 return SelectPostLoadLane(Node, 1, ARM64::LD1i32_POST);
2753 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2755 return SelectPostLoadLane(Node, 1, ARM64::LD1i64_POST);
2758 case ARM64ISD::LD2LANEpost: {
2759 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2760 return SelectPostLoadLane(Node, 2, ARM64::LD2i8_POST);
2761 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2762 return SelectPostLoadLane(Node, 2, ARM64::LD2i16_POST);
2763 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2765 return SelectPostLoadLane(Node, 2, ARM64::LD2i32_POST);
2766 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2768 return SelectPostLoadLane(Node, 2, ARM64::LD2i64_POST);
2771 case ARM64ISD::LD3LANEpost: {
2772 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2773 return SelectPostLoadLane(Node, 3, ARM64::LD3i8_POST);
2774 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2775 return SelectPostLoadLane(Node, 3, ARM64::LD3i16_POST);
2776 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2778 return SelectPostLoadLane(Node, 3, ARM64::LD3i32_POST);
2779 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2781 return SelectPostLoadLane(Node, 3, ARM64::LD3i64_POST);
2784 case ARM64ISD::LD4LANEpost: {
2785 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2786 return SelectPostLoadLane(Node, 4, ARM64::LD4i8_POST);
2787 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2788 return SelectPostLoadLane(Node, 4, ARM64::LD4i16_POST);
2789 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2791 return SelectPostLoadLane(Node, 4, ARM64::LD4i32_POST);
2792 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2794 return SelectPostLoadLane(Node, 4, ARM64::LD4i64_POST);
2797 case ARM64ISD::ST2post: {
2798 VT = Node->getOperand(1).getValueType();
2799 if (VT == MVT::v8i8)
2800 return SelectPostStore(Node, 2, ARM64::ST2Twov8b_POST);
2801 else if (VT == MVT::v16i8)
2802 return SelectPostStore(Node, 2, ARM64::ST2Twov16b_POST);
2803 else if (VT == MVT::v4i16)
2804 return SelectPostStore(Node, 2, ARM64::ST2Twov4h_POST);
2805 else if (VT == MVT::v8i16)
2806 return SelectPostStore(Node, 2, ARM64::ST2Twov8h_POST);
2807 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2808 return SelectPostStore(Node, 2, ARM64::ST2Twov2s_POST);
2809 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2810 return SelectPostStore(Node, 2, ARM64::ST2Twov4s_POST);
2811 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2812 return SelectPostStore(Node, 2, ARM64::ST2Twov2d_POST);
2813 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2814 return SelectPostStore(Node, 2, ARM64::ST1Twov1d_POST);
2817 case ARM64ISD::ST3post: {
2818 VT = Node->getOperand(1).getValueType();
2819 if (VT == MVT::v8i8)
2820 return SelectPostStore(Node, 3, ARM64::ST3Threev8b_POST);
2821 else if (VT == MVT::v16i8)
2822 return SelectPostStore(Node, 3, ARM64::ST3Threev16b_POST);
2823 else if (VT == MVT::v4i16)
2824 return SelectPostStore(Node, 3, ARM64::ST3Threev4h_POST);
2825 else if (VT == MVT::v8i16)
2826 return SelectPostStore(Node, 3, ARM64::ST3Threev8h_POST);
2827 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2828 return SelectPostStore(Node, 3, ARM64::ST3Threev2s_POST);
2829 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2830 return SelectPostStore(Node, 3, ARM64::ST3Threev4s_POST);
2831 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2832 return SelectPostStore(Node, 3, ARM64::ST3Threev2d_POST);
2833 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2834 return SelectPostStore(Node, 3, ARM64::ST1Threev1d_POST);
2837 case ARM64ISD::ST4post: {
2838 VT = Node->getOperand(1).getValueType();
2839 if (VT == MVT::v8i8)
2840 return SelectPostStore(Node, 4, ARM64::ST4Fourv8b_POST);
2841 else if (VT == MVT::v16i8)
2842 return SelectPostStore(Node, 4, ARM64::ST4Fourv16b_POST);
2843 else if (VT == MVT::v4i16)
2844 return SelectPostStore(Node, 4, ARM64::ST4Fourv4h_POST);
2845 else if (VT == MVT::v8i16)
2846 return SelectPostStore(Node, 4, ARM64::ST4Fourv8h_POST);
2847 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2848 return SelectPostStore(Node, 4, ARM64::ST4Fourv2s_POST);
2849 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2850 return SelectPostStore(Node, 4, ARM64::ST4Fourv4s_POST);
2851 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2852 return SelectPostStore(Node, 4, ARM64::ST4Fourv2d_POST);
2853 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2854 return SelectPostStore(Node, 4, ARM64::ST1Fourv1d_POST);
2857 case ARM64ISD::ST1x2post: {
2858 VT = Node->getOperand(1).getValueType();
2859 if (VT == MVT::v8i8)
2860 return SelectPostStore(Node, 2, ARM64::ST1Twov8b_POST);
2861 else if (VT == MVT::v16i8)
2862 return SelectPostStore(Node, 2, ARM64::ST1Twov16b_POST);
2863 else if (VT == MVT::v4i16)
2864 return SelectPostStore(Node, 2, ARM64::ST1Twov4h_POST);
2865 else if (VT == MVT::v8i16)
2866 return SelectPostStore(Node, 2, ARM64::ST1Twov8h_POST);
2867 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2868 return SelectPostStore(Node, 2, ARM64::ST1Twov2s_POST);
2869 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2870 return SelectPostStore(Node, 2, ARM64::ST1Twov4s_POST);
2871 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2872 return SelectPostStore(Node, 2, ARM64::ST1Twov1d_POST);
2873 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2874 return SelectPostStore(Node, 2, ARM64::ST1Twov2d_POST);
2877 case ARM64ISD::ST1x3post: {
2878 VT = Node->getOperand(1).getValueType();
2879 if (VT == MVT::v8i8)
2880 return SelectPostStore(Node, 3, ARM64::ST1Threev8b_POST);
2881 else if (VT == MVT::v16i8)
2882 return SelectPostStore(Node, 3, ARM64::ST1Threev16b_POST);
2883 else if (VT == MVT::v4i16)
2884 return SelectPostStore(Node, 3, ARM64::ST1Threev4h_POST);
2885 else if (VT == MVT::v8i16)
2886 return SelectPostStore(Node, 3, ARM64::ST1Threev8h_POST);
2887 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2888 return SelectPostStore(Node, 3, ARM64::ST1Threev2s_POST);
2889 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2890 return SelectPostStore(Node, 3, ARM64::ST1Threev4s_POST);
2891 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2892 return SelectPostStore(Node, 3, ARM64::ST1Threev1d_POST);
2893 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2894 return SelectPostStore(Node, 3, ARM64::ST1Threev2d_POST);
2897 case ARM64ISD::ST1x4post: {
2898 VT = Node->getOperand(1).getValueType();
2899 if (VT == MVT::v8i8)
2900 return SelectPostStore(Node, 4, ARM64::ST1Fourv8b_POST);
2901 else if (VT == MVT::v16i8)
2902 return SelectPostStore(Node, 4, ARM64::ST1Fourv16b_POST);
2903 else if (VT == MVT::v4i16)
2904 return SelectPostStore(Node, 4, ARM64::ST1Fourv4h_POST);
2905 else if (VT == MVT::v8i16)
2906 return SelectPostStore(Node, 4, ARM64::ST1Fourv8h_POST);
2907 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2908 return SelectPostStore(Node, 4, ARM64::ST1Fourv2s_POST);
2909 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2910 return SelectPostStore(Node, 4, ARM64::ST1Fourv4s_POST);
2911 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2912 return SelectPostStore(Node, 4, ARM64::ST1Fourv1d_POST);
2913 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2914 return SelectPostStore(Node, 4, ARM64::ST1Fourv2d_POST);
2917 case ARM64ISD::ST2LANEpost: {
2918 VT = Node->getOperand(1).getValueType();
2919 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2920 return SelectPostStoreLane(Node, 2, ARM64::ST2i8_POST);
2921 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2922 return SelectPostStoreLane(Node, 2, ARM64::ST2i16_POST);
2923 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2925 return SelectPostStoreLane(Node, 2, ARM64::ST2i32_POST);
2926 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2928 return SelectPostStoreLane(Node, 2, ARM64::ST2i64_POST);
2931 case ARM64ISD::ST3LANEpost: {
2932 VT = Node->getOperand(1).getValueType();
2933 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2934 return SelectPostStoreLane(Node, 3, ARM64::ST3i8_POST);
2935 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2936 return SelectPostStoreLane(Node, 3, ARM64::ST3i16_POST);
2937 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2939 return SelectPostStoreLane(Node, 3, ARM64::ST3i32_POST);
2940 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2942 return SelectPostStoreLane(Node, 3, ARM64::ST3i64_POST);
2945 case ARM64ISD::ST4LANEpost: {
2946 VT = Node->getOperand(1).getValueType();
2947 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2948 return SelectPostStoreLane(Node, 4, ARM64::ST4i8_POST);
2949 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2950 return SelectPostStoreLane(Node, 4, ARM64::ST4i16_POST);
2951 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2953 return SelectPostStoreLane(Node, 4, ARM64::ST4i32_POST);
2954 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2956 return SelectPostStoreLane(Node, 4, ARM64::ST4i64_POST);
2964 if (SDNode *I = SelectLIBM(Node))
2969 // Select the default instruction
2970 ResNode = SelectCode(Node);
2972 DEBUG(errs() << "=> ");
2973 if (ResNode == nullptr || ResNode == Node)
2974 DEBUG(Node->dump(CurDAG));
2976 DEBUG(ResNode->dump(CurDAG));
2977 DEBUG(errs() << "\n");
2982 /// createARM64ISelDag - This pass converts a legalized DAG into a
2983 /// ARM64-specific DAG, ready for instruction scheduling.
2984 FunctionPass *llvm::createARM64ISelDag(ARM64TargetMachine &TM,
2985 CodeGenOpt::Level OptLevel) {
2986 return new ARM64DAGToDAGISel(TM, OptLevel);