1 //===-- ARM64ISelDAGToDAG.cpp - A dag to dag inst selector for ARM64 ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM64 target.
12 //===----------------------------------------------------------------------===//
14 #include "ARM64TargetMachine.h"
15 #include "MCTargetDesc/ARM64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
28 #define DEBUG_TYPE "arm64-isel"
30 //===--------------------------------------------------------------------===//
31 /// ARM64DAGToDAGISel - ARM64 specific code to select ARM64 machine
32 /// instructions for SelectionDAG operations.
36 class ARM64DAGToDAGISel : public SelectionDAGISel {
37 ARM64TargetMachine &TM;
39 /// Subtarget - Keep a pointer to the ARM64Subtarget around so that we can
40 /// make the right decision when generating code for different targets.
41 const ARM64Subtarget *Subtarget;
46 explicit ARM64DAGToDAGISel(ARM64TargetMachine &tm, CodeGenOpt::Level OptLevel)
47 : SelectionDAGISel(tm, OptLevel), TM(tm),
48 Subtarget(nullptr), ForCodeSize(false) {}
50 const char *getPassName() const override {
51 return "ARM64 Instruction Selection";
54 bool runOnMachineFunction(MachineFunction &MF) override {
55 AttributeSet FnAttrs = MF.getFunction()->getAttributes();
57 FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
58 Attribute::OptimizeForSize) ||
59 FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
60 Subtarget = &TM.getSubtarget<ARM64Subtarget>();
61 return SelectionDAGISel::runOnMachineFunction(MF);
64 SDNode *Select(SDNode *Node) override;
66 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
67 /// inline asm expressions.
68 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
70 std::vector<SDValue> &OutOps) override;
72 SDNode *SelectMLAV64LaneV128(SDNode *N);
73 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
74 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
75 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
76 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
77 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
78 return SelectShiftedRegister(N, false, Reg, Shift);
80 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
81 return SelectShiftedRegister(N, true, Reg, Shift);
83 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
84 return SelectAddrModeIndexed(N, 1, Base, OffImm);
86 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
87 return SelectAddrModeIndexed(N, 2, Base, OffImm);
89 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
90 return SelectAddrModeIndexed(N, 4, Base, OffImm);
92 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
93 return SelectAddrModeIndexed(N, 8, Base, OffImm);
95 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
96 return SelectAddrModeIndexed(N, 16, Base, OffImm);
98 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
99 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
101 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
102 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
104 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
105 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
107 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
108 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
110 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
111 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
115 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
116 SDValue &SignExtend, SDValue &DoShift) {
117 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
121 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
122 SDValue &SignExtend, SDValue &DoShift) {
123 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
127 /// Form sequences of consecutive 64/128-bit registers for use in NEON
128 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
129 /// between 1 and 4 elements. If it contains a single element that is returned
130 /// unchanged; otherwise a REG_SEQUENCE value is returned.
131 SDValue createDTuple(ArrayRef<SDValue> Vecs);
132 SDValue createQTuple(ArrayRef<SDValue> Vecs);
134 /// Generic helper for the createDTuple/createQTuple
135 /// functions. Those should almost always be called instead.
136 SDValue createTuple(ArrayRef<SDValue> Vecs, unsigned RegClassIDs[],
139 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
141 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
143 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
145 SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
147 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
148 SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
150 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
151 SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
152 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
153 SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
155 SDNode *SelectSIMDAddSubNarrowing(unsigned IntNo, SDNode *Node);
156 SDNode *SelectSIMDXtnNarrowing(unsigned IntNo, SDNode *Node);
158 SDNode *SelectBitfieldExtractOp(SDNode *N);
159 SDNode *SelectBitfieldInsertOp(SDNode *N);
161 SDNode *SelectLIBM(SDNode *N);
163 // Include the pieces autogenerated from the target description.
164 #include "ARM64GenDAGISel.inc"
167 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
169 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
171 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
173 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
174 SDValue &Offset, SDValue &SignExtend,
176 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
177 SDValue &Offset, SDValue &SignExtend,
179 bool isWorthFolding(SDValue V) const;
180 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
181 SDValue &Offset, SDValue &SignExtend);
183 template<unsigned RegWidth>
184 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
185 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
188 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
190 } // end anonymous namespace
192 /// isIntImmediate - This method tests to see if the node is a constant
193 /// operand. If so Imm will receive the 32-bit value.
194 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
195 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
196 Imm = C->getZExtValue();
202 // isIntImmediate - This method tests to see if a constant operand.
203 // If so Imm will receive the value.
204 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
205 return isIntImmediate(N.getNode(), Imm);
208 // isOpcWithIntImmediate - This method tests to see if the node is a specific
209 // opcode and that it has a immediate integer right operand.
210 // If so Imm will receive the 32 bit value.
211 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
213 return N->getOpcode() == Opc &&
214 isIntImmediate(N->getOperand(1).getNode(), Imm);
217 bool ARM64DAGToDAGISel::SelectInlineAsmMemoryOperand(
218 const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps) {
219 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
220 // Require the address to be in a register. That is safe for all ARM64
221 // variants and it is hard to do anything much smarter without knowing
222 // how the operand is used.
223 OutOps.push_back(Op);
227 /// SelectArithImmed - Select an immediate value that can be represented as
228 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
229 /// Val set to the 12-bit value and Shift set to the shifter operand.
230 bool ARM64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
232 // This function is called from the addsub_shifted_imm ComplexPattern,
233 // which lists [imm] as the list of opcode it's interested in, however
234 // we still need to check whether the operand is actually an immediate
235 // here because the ComplexPattern opcode list is only used in
236 // root-level opcode matching.
237 if (!isa<ConstantSDNode>(N.getNode()))
240 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
243 if (Immed >> 12 == 0) {
245 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
251 unsigned ShVal = ARM64_AM::getShifterImm(ARM64_AM::LSL, ShiftAmt);
252 Val = CurDAG->getTargetConstant(Immed, MVT::i32);
253 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
257 /// SelectNegArithImmed - As above, but negates the value before trying to
259 bool ARM64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
261 // This function is called from the addsub_shifted_imm ComplexPattern,
262 // which lists [imm] as the list of opcode it's interested in, however
263 // we still need to check whether the operand is actually an immediate
264 // here because the ComplexPattern opcode list is only used in
265 // root-level opcode matching.
266 if (!isa<ConstantSDNode>(N.getNode()))
269 // The immediate operand must be a 24-bit zero-extended immediate.
270 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
272 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
273 // have the opposite effect on the C flag, so this pattern mustn't match under
274 // those circumstances.
278 if (N.getValueType() == MVT::i32)
279 Immed = ~((uint32_t)Immed) + 1;
281 Immed = ~Immed + 1ULL;
282 if (Immed & 0xFFFFFFFFFF000000ULL)
285 Immed &= 0xFFFFFFULL;
286 return SelectArithImmed(CurDAG->getConstant(Immed, MVT::i32), Val, Shift);
289 /// getShiftTypeForNode - Translate a shift node to the corresponding
291 static ARM64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
292 switch (N.getOpcode()) {
294 return ARM64_AM::InvalidShiftExtend;
296 return ARM64_AM::LSL;
298 return ARM64_AM::LSR;
300 return ARM64_AM::ASR;
302 return ARM64_AM::ROR;
306 /// \brief Determine wether it is worth to fold V into an extended register.
307 bool ARM64DAGToDAGISel::isWorthFolding(SDValue V) const {
308 // it hurts if the a value is used at least twice, unless we are optimizing
310 if (ForCodeSize || V.hasOneUse())
315 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
316 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
317 /// instructions allow the shifted register to be rotated, but the arithmetic
318 /// instructions do not. The AllowROR parameter specifies whether ROR is
320 bool ARM64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
321 SDValue &Reg, SDValue &Shift) {
322 ARM64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
323 if (ShType == ARM64_AM::InvalidShiftExtend)
325 if (!AllowROR && ShType == ARM64_AM::ROR)
328 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
329 unsigned BitSize = N.getValueType().getSizeInBits();
330 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
331 unsigned ShVal = ARM64_AM::getShifterImm(ShType, Val);
333 Reg = N.getOperand(0);
334 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
335 return isWorthFolding(N);
341 /// getExtendTypeForNode - Translate an extend node to the corresponding
342 /// ExtendType value.
343 static ARM64_AM::ShiftExtendType
344 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
345 if (N.getOpcode() == ISD::SIGN_EXTEND ||
346 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
348 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
349 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
351 SrcVT = N.getOperand(0).getValueType();
353 if (!IsLoadStore && SrcVT == MVT::i8)
354 return ARM64_AM::SXTB;
355 else if (!IsLoadStore && SrcVT == MVT::i16)
356 return ARM64_AM::SXTH;
357 else if (SrcVT == MVT::i32)
358 return ARM64_AM::SXTW;
359 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
361 return ARM64_AM::InvalidShiftExtend;
362 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
363 N.getOpcode() == ISD::ANY_EXTEND) {
364 EVT SrcVT = N.getOperand(0).getValueType();
365 if (!IsLoadStore && SrcVT == MVT::i8)
366 return ARM64_AM::UXTB;
367 else if (!IsLoadStore && SrcVT == MVT::i16)
368 return ARM64_AM::UXTH;
369 else if (SrcVT == MVT::i32)
370 return ARM64_AM::UXTW;
371 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
373 return ARM64_AM::InvalidShiftExtend;
374 } else if (N.getOpcode() == ISD::AND) {
375 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
377 return ARM64_AM::InvalidShiftExtend;
378 uint64_t AndMask = CSD->getZExtValue();
382 return ARM64_AM::InvalidShiftExtend;
384 return !IsLoadStore ? ARM64_AM::UXTB : ARM64_AM::InvalidShiftExtend;
386 return !IsLoadStore ? ARM64_AM::UXTH : ARM64_AM::InvalidShiftExtend;
388 return ARM64_AM::UXTW;
392 return ARM64_AM::InvalidShiftExtend;
395 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
396 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
397 if (DL->getOpcode() != ARM64ISD::DUPLANE16 &&
398 DL->getOpcode() != ARM64ISD::DUPLANE32)
401 SDValue SV = DL->getOperand(0);
402 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
405 SDValue EV = SV.getOperand(1);
406 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
409 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
410 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
411 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
412 LaneOp = EV.getOperand(0);
417 // Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
418 // high lane extract.
419 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
420 SDValue &LaneOp, int &LaneIdx) {
422 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
424 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
431 /// SelectMLAV64LaneV128 - ARM64 supports vector MLAs where one multiplicand is
432 /// a lane in the upper half of a 128-bit vector. Recognize and select this so
433 /// that we don't emit unnecessary lane extracts.
434 SDNode *ARM64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
435 SDValue Op0 = N->getOperand(0);
436 SDValue Op1 = N->getOperand(1);
437 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
438 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
439 int LaneIdx = -1; // Will hold the lane index.
441 if (Op1.getOpcode() != ISD::MUL ||
442 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
445 if (Op1.getOpcode() != ISD::MUL ||
446 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
451 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
453 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
455 unsigned MLAOpc = ~0U;
457 switch (N->getSimpleValueType(0).SimpleTy) {
459 llvm_unreachable("Unrecognized MLA.");
461 MLAOpc = ARM64::MLAv4i16_indexed;
464 MLAOpc = ARM64::MLAv8i16_indexed;
467 MLAOpc = ARM64::MLAv2i32_indexed;
470 MLAOpc = ARM64::MLAv4i32_indexed;
474 return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops);
477 SDNode *ARM64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
482 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
486 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
488 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
490 unsigned SMULLOpc = ~0U;
492 if (IntNo == Intrinsic::arm64_neon_smull) {
493 switch (N->getSimpleValueType(0).SimpleTy) {
495 llvm_unreachable("Unrecognized SMULL.");
497 SMULLOpc = ARM64::SMULLv4i16_indexed;
500 SMULLOpc = ARM64::SMULLv2i32_indexed;
503 } else if (IntNo == Intrinsic::arm64_neon_umull) {
504 switch (N->getSimpleValueType(0).SimpleTy) {
506 llvm_unreachable("Unrecognized SMULL.");
508 SMULLOpc = ARM64::UMULLv4i16_indexed;
511 SMULLOpc = ARM64::UMULLv2i32_indexed;
515 llvm_unreachable("Unrecognized intrinsic.");
517 return CurDAG->getMachineNode(SMULLOpc, SDLoc(N), N->getValueType(0), Ops);
520 /// Instructions that accept extend modifiers like UXTW expect the register
521 /// being extended to be a GPR32, but the incoming DAG might be acting on a
522 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
523 /// this is the case.
524 static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
525 if (N.getValueType() == MVT::i32)
528 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
529 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
530 SDLoc(N), MVT::i32, N, SubReg);
531 return SDValue(Node, 0);
535 /// SelectArithExtendedRegister - Select a "extended register" operand. This
536 /// operand folds in an extend followed by an optional left shift.
537 bool ARM64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
539 unsigned ShiftVal = 0;
540 ARM64_AM::ShiftExtendType Ext;
542 if (N.getOpcode() == ISD::SHL) {
543 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
546 ShiftVal = CSD->getZExtValue();
550 Ext = getExtendTypeForNode(N.getOperand(0));
551 if (Ext == ARM64_AM::InvalidShiftExtend)
554 Reg = N.getOperand(0).getOperand(0);
556 Ext = getExtendTypeForNode(N);
557 if (Ext == ARM64_AM::InvalidShiftExtend)
560 Reg = N.getOperand(0);
563 // ARM64 mandates that the RHS of the operation must use the smallest
564 // register classs that could contain the size being extended from. Thus,
565 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
566 // there might not be an actual 32-bit value in the program. We can
567 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
568 assert(Ext != ARM64_AM::UXTX && Ext != ARM64_AM::SXTX);
569 Reg = narrowIfNeeded(CurDAG, Reg);
570 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), MVT::i32);
571 return isWorthFolding(N);
574 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
575 /// immediate" address. The "Size" argument is the size in bytes of the memory
576 /// reference, which determines the scale.
577 bool ARM64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
578 SDValue &Base, SDValue &OffImm) {
579 const TargetLowering *TLI = getTargetLowering();
580 if (N.getOpcode() == ISD::FrameIndex) {
581 int FI = cast<FrameIndexSDNode>(N)->getIndex();
582 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
583 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
587 if (N.getOpcode() == ARM64ISD::ADDlow) {
588 GlobalAddressSDNode *GAN =
589 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
590 Base = N.getOperand(0);
591 OffImm = N.getOperand(1);
595 const GlobalValue *GV = GAN->getGlobal();
596 unsigned Alignment = GV->getAlignment();
597 const DataLayout *DL = TLI->getDataLayout();
598 if (Alignment == 0 && !Subtarget->isTargetDarwin())
599 Alignment = DL->getABITypeAlignment(GV->getType()->getElementType());
601 if (Alignment >= Size)
605 if (CurDAG->isBaseWithConstantOffset(N)) {
606 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
607 int64_t RHSC = (int64_t)RHS->getZExtValue();
608 unsigned Scale = Log2_32(Size);
609 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
610 Base = N.getOperand(0);
611 if (Base.getOpcode() == ISD::FrameIndex) {
612 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
613 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
615 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, MVT::i64);
621 // Before falling back to our general case, check if the unscaled
622 // instructions can handle this. If so, that's preferable.
623 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
626 // Base only. The address will be materialized into a register before
627 // the memory is accessed.
628 // add x0, Xbase, #offset
631 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
635 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
636 /// immediate" address. This should only match when there is an offset that
637 /// is not valid for a scaled immediate addressing mode. The "Size" argument
638 /// is the size in bytes of the memory reference, which is needed here to know
639 /// what is valid for a scaled immediate.
640 bool ARM64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
641 SDValue &Base, SDValue &OffImm) {
642 if (!CurDAG->isBaseWithConstantOffset(N))
644 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
645 int64_t RHSC = RHS->getSExtValue();
646 // If the offset is valid as a scaled immediate, don't match here.
647 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
648 RHSC < (0x1000 << Log2_32(Size)))
650 if (RHSC >= -256 && RHSC < 256) {
651 Base = N.getOperand(0);
652 if (Base.getOpcode() == ISD::FrameIndex) {
653 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
654 const TargetLowering *TLI = getTargetLowering();
655 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
657 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i64);
664 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
665 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
666 SDValue ImpDef = SDValue(
667 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SDLoc(N), MVT::i64),
669 MachineSDNode *Node = CurDAG->getMachineNode(
670 TargetOpcode::INSERT_SUBREG, SDLoc(N), MVT::i64, ImpDef, N, SubReg);
671 return SDValue(Node, 0);
674 /// \brief Check if the given SHL node (\p N), can be used to form an
675 /// extended register for an addressing mode.
676 bool ARM64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
677 bool WantExtend, SDValue &Offset,
678 SDValue &SignExtend) {
679 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
680 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
681 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
685 ARM64_AM::ShiftExtendType Ext = getExtendTypeForNode(N.getOperand(0), true);
686 if (Ext == ARM64_AM::InvalidShiftExtend)
689 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
690 SignExtend = CurDAG->getTargetConstant(Ext == ARM64_AM::SXTW, MVT::i32);
692 Offset = N.getOperand(0);
693 SignExtend = CurDAG->getTargetConstant(0, MVT::i32);
696 unsigned LegalShiftVal = Log2_32(Size);
697 unsigned ShiftVal = CSD->getZExtValue();
699 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
702 if (isWorthFolding(N))
708 bool ARM64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
709 SDValue &Base, SDValue &Offset,
712 if (N.getOpcode() != ISD::ADD)
714 SDValue LHS = N.getOperand(0);
715 SDValue RHS = N.getOperand(1);
717 // We don't want to match immediate adds here, because they are better lowered
718 // to the register-immediate addressing modes.
719 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
722 // Check if this particular node is reused in any non-memory related
723 // operation. If yes, do not try to fold this node into the address
724 // computation, since the computation will be kept.
725 const SDNode *Node = N.getNode();
726 for (SDNode *UI : Node->uses()) {
727 if (!isa<MemSDNode>(*UI))
731 // Remember if it is worth folding N when it produces extended register.
732 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
734 // Try to match a shifted extend on the RHS.
735 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
736 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
738 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
742 // Try to match a shifted extend on the LHS.
743 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
744 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
746 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
750 // There was no shift, whatever else we find.
751 DoShift = CurDAG->getTargetConstant(false, MVT::i32);
753 ARM64_AM::ShiftExtendType Ext = ARM64_AM::InvalidShiftExtend;
754 // Try to match an unshifted extend on the LHS.
755 if (IsExtendedRegisterWorthFolding &&
756 (Ext = getExtendTypeForNode(LHS, true)) != ARM64_AM::InvalidShiftExtend) {
758 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
759 SignExtend = CurDAG->getTargetConstant(Ext == ARM64_AM::SXTW, MVT::i32);
760 if (isWorthFolding(LHS))
764 // Try to match an unshifted extend on the RHS.
765 if (IsExtendedRegisterWorthFolding &&
766 (Ext = getExtendTypeForNode(RHS, true)) != ARM64_AM::InvalidShiftExtend) {
768 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
769 SignExtend = CurDAG->getTargetConstant(Ext == ARM64_AM::SXTW, MVT::i32);
770 if (isWorthFolding(RHS))
777 bool ARM64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
778 SDValue &Base, SDValue &Offset,
781 if (N.getOpcode() != ISD::ADD)
783 SDValue LHS = N.getOperand(0);
784 SDValue RHS = N.getOperand(1);
786 // We don't want to match immediate adds here, because they are better lowered
787 // to the register-immediate addressing modes.
788 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
791 // Check if this particular node is reused in any non-memory related
792 // operation. If yes, do not try to fold this node into the address
793 // computation, since the computation will be kept.
794 const SDNode *Node = N.getNode();
795 for (SDNode *UI : Node->uses()) {
796 if (!isa<MemSDNode>(*UI))
800 // Remember if it is worth folding N when it produces extended register.
801 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
803 // Try to match a shifted extend on the RHS.
804 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
805 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
807 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
811 // Try to match a shifted extend on the LHS.
812 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
813 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
815 DoShift = CurDAG->getTargetConstant(true, MVT::i32);
819 // Match any non-shifted, non-extend, non-immediate add expression.
822 SignExtend = CurDAG->getTargetConstant(false, MVT::i32);
823 DoShift = CurDAG->getTargetConstant(false, MVT::i32);
824 // Reg1 + Reg2 is free: no check needed.
828 SDValue ARM64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
829 static unsigned RegClassIDs[] = { ARM64::DDRegClassID, ARM64::DDDRegClassID,
830 ARM64::DDDDRegClassID };
831 static unsigned SubRegs[] = { ARM64::dsub0, ARM64::dsub1,
832 ARM64::dsub2, ARM64::dsub3 };
834 return createTuple(Regs, RegClassIDs, SubRegs);
837 SDValue ARM64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
838 static unsigned RegClassIDs[] = { ARM64::QQRegClassID, ARM64::QQQRegClassID,
839 ARM64::QQQQRegClassID };
840 static unsigned SubRegs[] = { ARM64::qsub0, ARM64::qsub1,
841 ARM64::qsub2, ARM64::qsub3 };
843 return createTuple(Regs, RegClassIDs, SubRegs);
846 SDValue ARM64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
847 unsigned RegClassIDs[],
848 unsigned SubRegs[]) {
849 // There's no special register-class for a vector-list of 1 element: it's just
851 if (Regs.size() == 1)
854 assert(Regs.size() >= 2 && Regs.size() <= 4);
856 SDLoc DL(Regs[0].getNode());
858 SmallVector<SDValue, 4> Ops;
860 // First operand of REG_SEQUENCE is the desired RegClass.
862 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
864 // Then we get pairs of source & subregister-position for the components.
865 for (unsigned i = 0; i < Regs.size(); ++i) {
866 Ops.push_back(Regs[i]);
867 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
871 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
872 return SDValue(N, 0);
875 SDNode *ARM64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
876 unsigned Opc, bool isExt) {
878 EVT VT = N->getValueType(0);
880 unsigned ExtOff = isExt;
882 // Form a REG_SEQUENCE to force register allocation.
883 unsigned Vec0Off = ExtOff + 1;
884 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
885 N->op_begin() + Vec0Off + NumVecs);
886 SDValue RegSeq = createQTuple(Regs);
888 SmallVector<SDValue, 6> Ops;
890 Ops.push_back(N->getOperand(1));
891 Ops.push_back(RegSeq);
892 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
893 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
896 SDNode *ARM64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
897 LoadSDNode *LD = cast<LoadSDNode>(N);
898 if (LD->isUnindexed())
900 EVT VT = LD->getMemoryVT();
901 EVT DstVT = N->getValueType(0);
902 ISD::MemIndexedMode AM = LD->getAddressingMode();
903 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
905 // We're not doing validity checking here. That was done when checking
906 // if we should mark the load as indexed or not. We're just selecting
907 // the right instruction.
910 ISD::LoadExtType ExtType = LD->getExtensionType();
911 bool InsertTo64 = false;
913 Opcode = IsPre ? ARM64::LDRXpre : ARM64::LDRXpost;
914 else if (VT == MVT::i32) {
915 if (ExtType == ISD::NON_EXTLOAD)
916 Opcode = IsPre ? ARM64::LDRWpre : ARM64::LDRWpost;
917 else if (ExtType == ISD::SEXTLOAD)
918 Opcode = IsPre ? ARM64::LDRSWpre : ARM64::LDRSWpost;
920 Opcode = IsPre ? ARM64::LDRWpre : ARM64::LDRWpost;
922 // The result of the load is only i32. It's the subreg_to_reg that makes
926 } else if (VT == MVT::i16) {
927 if (ExtType == ISD::SEXTLOAD) {
928 if (DstVT == MVT::i64)
929 Opcode = IsPre ? ARM64::LDRSHXpre : ARM64::LDRSHXpost;
931 Opcode = IsPre ? ARM64::LDRSHWpre : ARM64::LDRSHWpost;
933 Opcode = IsPre ? ARM64::LDRHHpre : ARM64::LDRHHpost;
934 InsertTo64 = DstVT == MVT::i64;
935 // The result of the load is only i32. It's the subreg_to_reg that makes
939 } else if (VT == MVT::i8) {
940 if (ExtType == ISD::SEXTLOAD) {
941 if (DstVT == MVT::i64)
942 Opcode = IsPre ? ARM64::LDRSBXpre : ARM64::LDRSBXpost;
944 Opcode = IsPre ? ARM64::LDRSBWpre : ARM64::LDRSBWpost;
946 Opcode = IsPre ? ARM64::LDRBBpre : ARM64::LDRBBpost;
947 InsertTo64 = DstVT == MVT::i64;
948 // The result of the load is only i32. It's the subreg_to_reg that makes
952 } else if (VT == MVT::f32) {
953 Opcode = IsPre ? ARM64::LDRSpre : ARM64::LDRSpost;
954 } else if (VT == MVT::f64 || VT.is64BitVector()) {
955 Opcode = IsPre ? ARM64::LDRDpre : ARM64::LDRDpost;
956 } else if (VT.is128BitVector()) {
957 Opcode = IsPre ? ARM64::LDRQpre : ARM64::LDRQpost;
960 SDValue Chain = LD->getChain();
961 SDValue Base = LD->getBasePtr();
962 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
963 int OffsetVal = (int)OffsetOp->getZExtValue();
964 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, MVT::i64);
965 SDValue Ops[] = { Base, Offset, Chain };
966 SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, DstVT,
968 // Either way, we're replacing the node, so tell the caller that.
970 SDValue LoadedVal = SDValue(Res, 1);
972 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
974 SDValue(CurDAG->getMachineNode(ARM64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
975 CurDAG->getTargetConstant(0, MVT::i64),
980 ReplaceUses(SDValue(N, 0), LoadedVal);
981 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
982 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
987 SDNode *ARM64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
988 unsigned SubRegIdx) {
990 EVT VT = N->getValueType(0);
991 SDValue Chain = N->getOperand(0);
993 SmallVector<SDValue, 6> Ops;
994 Ops.push_back(N->getOperand(2)); // Mem operand;
995 Ops.push_back(Chain);
997 std::vector<EVT> ResTys;
998 ResTys.push_back(MVT::Untyped);
999 ResTys.push_back(MVT::Other);
1001 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1002 SDValue SuperReg = SDValue(Ld, 0);
1003 for (unsigned i = 0; i < NumVecs; ++i)
1004 ReplaceUses(SDValue(N, i),
1005 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1007 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1011 SDNode *ARM64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1012 unsigned Opc, unsigned SubRegIdx) {
1014 EVT VT = N->getValueType(0);
1015 SDValue Chain = N->getOperand(0);
1017 SmallVector<SDValue, 6> Ops;
1018 Ops.push_back(N->getOperand(1)); // Mem operand
1019 Ops.push_back(N->getOperand(2)); // Incremental
1020 Ops.push_back(Chain);
1022 std::vector<EVT> ResTys;
1023 ResTys.push_back(MVT::i64); // Type of the write back register
1024 ResTys.push_back(MVT::Untyped);
1025 ResTys.push_back(MVT::Other);
1027 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1029 // Update uses of write back register
1030 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1032 // Update uses of vector list
1033 SDValue SuperReg = SDValue(Ld, 1);
1035 ReplaceUses(SDValue(N, 0), SuperReg);
1037 for (unsigned i = 0; i < NumVecs; ++i)
1038 ReplaceUses(SDValue(N, i),
1039 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1042 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1046 SDNode *ARM64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1049 EVT VT = N->getOperand(2)->getValueType(0);
1051 // Form a REG_SEQUENCE to force register allocation.
1052 bool Is128Bit = VT.getSizeInBits() == 128;
1053 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1054 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1056 SmallVector<SDValue, 6> Ops;
1057 Ops.push_back(RegSeq);
1058 Ops.push_back(N->getOperand(NumVecs + 2));
1059 Ops.push_back(N->getOperand(0));
1060 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1065 SDNode *ARM64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1068 EVT VT = N->getOperand(2)->getValueType(0);
1069 SmallVector<EVT, 2> ResTys;
1070 ResTys.push_back(MVT::i64); // Type of the write back register
1071 ResTys.push_back(MVT::Other); // Type for the Chain
1073 // Form a REG_SEQUENCE to force register allocation.
1074 bool Is128Bit = VT.getSizeInBits() == 128;
1075 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1076 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1078 SmallVector<SDValue, 6> Ops;
1079 Ops.push_back(RegSeq);
1080 Ops.push_back(N->getOperand(NumVecs + 1)); // base register
1081 Ops.push_back(N->getOperand(NumVecs + 2)); // Incremental
1082 Ops.push_back(N->getOperand(0)); // Chain
1083 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1088 /// WidenVector - Given a value in the V64 register class, produce the
1089 /// equivalent value in the V128 register class.
1094 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1096 SDValue operator()(SDValue V64Reg) {
1097 EVT VT = V64Reg.getValueType();
1098 unsigned NarrowSize = VT.getVectorNumElements();
1099 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1100 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1104 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1105 return DAG.getTargetInsertSubreg(ARM64::dsub, DL, WideTy, Undef, V64Reg);
1109 /// NarrowVector - Given a value in the V128 register class, produce the
1110 /// equivalent value in the V64 register class.
1111 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1112 EVT VT = V128Reg.getValueType();
1113 unsigned WideSize = VT.getVectorNumElements();
1114 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1115 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1117 return DAG.getTargetExtractSubreg(ARM64::dsub, SDLoc(V128Reg), NarrowTy,
1121 SDNode *ARM64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1124 EVT VT = N->getValueType(0);
1125 bool Narrow = VT.getSizeInBits() == 64;
1127 // Form a REG_SEQUENCE to force register allocation.
1128 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1131 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1132 WidenVector(*CurDAG));
1134 SDValue RegSeq = createQTuple(Regs);
1136 std::vector<EVT> ResTys;
1137 ResTys.push_back(MVT::Untyped);
1138 ResTys.push_back(MVT::Other);
1141 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1143 SmallVector<SDValue, 6> Ops;
1144 Ops.push_back(RegSeq);
1145 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1146 Ops.push_back(N->getOperand(NumVecs + 3));
1147 Ops.push_back(N->getOperand(0));
1148 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1149 SDValue SuperReg = SDValue(Ld, 0);
1151 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1152 static unsigned QSubs[] = { ARM64::qsub0, ARM64::qsub1, ARM64::qsub2,
1154 for (unsigned i = 0; i < NumVecs; ++i) {
1155 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1157 NV = NarrowVector(NV, *CurDAG);
1158 ReplaceUses(SDValue(N, i), NV);
1161 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1166 SDNode *ARM64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1169 EVT VT = N->getValueType(0);
1170 bool Narrow = VT.getSizeInBits() == 64;
1172 // Form a REG_SEQUENCE to force register allocation.
1173 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1176 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1177 WidenVector(*CurDAG));
1179 SDValue RegSeq = createQTuple(Regs);
1181 std::vector<EVT> ResTys;
1182 ResTys.push_back(MVT::i64); // Type of the write back register
1183 ResTys.push_back(MVT::Untyped);
1184 ResTys.push_back(MVT::Other);
1187 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1189 SmallVector<SDValue, 6> Ops;
1190 Ops.push_back(RegSeq);
1191 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64)); // Lane Number
1192 Ops.push_back(N->getOperand(NumVecs + 2)); // Base register
1193 Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
1194 Ops.push_back(N->getOperand(0));
1195 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1197 // Update uses of the write back register
1198 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1200 // Update uses of the vector list
1201 SDValue SuperReg = SDValue(Ld, 1);
1203 ReplaceUses(SDValue(N, 0),
1204 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1206 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1207 static unsigned QSubs[] = { ARM64::qsub0, ARM64::qsub1, ARM64::qsub2,
1209 for (unsigned i = 0; i < NumVecs; ++i) {
1210 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1213 NV = NarrowVector(NV, *CurDAG);
1214 ReplaceUses(SDValue(N, i), NV);
1219 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1224 SDNode *ARM64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1227 EVT VT = N->getOperand(2)->getValueType(0);
1228 bool Narrow = VT.getSizeInBits() == 64;
1230 // Form a REG_SEQUENCE to force register allocation.
1231 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1234 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1235 WidenVector(*CurDAG));
1237 SDValue RegSeq = createQTuple(Regs);
1240 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1242 SmallVector<SDValue, 6> Ops;
1243 Ops.push_back(RegSeq);
1244 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1245 Ops.push_back(N->getOperand(NumVecs + 3));
1246 Ops.push_back(N->getOperand(0));
1247 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1249 // Transfer memoperands.
1250 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1251 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1252 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1257 SDNode *ARM64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1260 EVT VT = N->getOperand(2)->getValueType(0);
1261 bool Narrow = VT.getSizeInBits() == 64;
1263 // Form a REG_SEQUENCE to force register allocation.
1264 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1267 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1268 WidenVector(*CurDAG));
1270 SDValue RegSeq = createQTuple(Regs);
1272 SmallVector<EVT, 2> ResTys;
1273 ResTys.push_back(MVT::i64); // Type of the write back register
1274 ResTys.push_back(MVT::Other);
1277 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1279 SmallVector<SDValue, 6> Ops;
1280 Ops.push_back(RegSeq);
1281 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1282 Ops.push_back(N->getOperand(NumVecs + 2)); // Base Register
1283 Ops.push_back(N->getOperand(NumVecs + 3)); // Incremental
1284 Ops.push_back(N->getOperand(0));
1285 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1287 // Transfer memoperands.
1288 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1289 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1290 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1295 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1296 unsigned &Opc, SDValue &Opd0,
1297 unsigned &LSB, unsigned &MSB,
1298 unsigned NumberOfIgnoredLowBits,
1299 bool BiggerPattern) {
1300 assert(N->getOpcode() == ISD::AND &&
1301 "N must be a AND operation to call this function");
1303 EVT VT = N->getValueType(0);
1305 // Here we can test the type of VT and return false when the type does not
1306 // match, but since it is done prior to that call in the current context
1307 // we turned that into an assert to avoid redundant code.
1308 assert((VT == MVT::i32 || VT == MVT::i64) &&
1309 "Type checking must have been done before calling this function");
1311 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1312 // changed the AND node to a 32-bit mask operation. We'll have to
1313 // undo that as part of the transform here if we want to catch all
1314 // the opportunities.
1315 // Currently the NumberOfIgnoredLowBits argument helps to recover
1316 // form these situations when matching bigger pattern (bitfield insert).
1318 // For unsigned extracts, check for a shift right and mask
1319 uint64_t And_imm = 0;
1320 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1323 const SDNode *Op0 = N->getOperand(0).getNode();
1325 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1326 // simplified. Try to undo that
1327 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1329 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1330 if (And_imm & (And_imm + 1))
1333 bool ClampMSB = false;
1334 uint64_t Srl_imm = 0;
1335 // Handle the SRL + ANY_EXTEND case.
1336 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1337 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1338 // Extend the incoming operand of the SRL to 64-bit.
1339 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1340 // Make sure to clamp the MSB so that we preserve the semantics of the
1341 // original operations.
1343 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1344 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1346 // If the shift result was truncated, we can still combine them.
1347 Opd0 = Op0->getOperand(0).getOperand(0);
1349 // Use the type of SRL node.
1350 VT = Opd0->getValueType(0);
1351 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1352 Opd0 = Op0->getOperand(0);
1353 } else if (BiggerPattern) {
1354 // Let's pretend a 0 shift right has been performed.
1355 // The resulting code will be at least as good as the original one
1356 // plus it may expose more opportunities for bitfield insert pattern.
1357 // FIXME: Currently we limit this to the bigger pattern, because
1358 // some optimizations expect AND and not UBFM
1359 Opd0 = N->getOperand(0);
1363 assert((BiggerPattern || (Srl_imm > 0 && Srl_imm < VT.getSizeInBits())) &&
1364 "bad amount in shift node!");
1367 MSB = Srl_imm + (VT == MVT::i32 ? CountTrailingOnes_32(And_imm)
1368 : CountTrailingOnes_64(And_imm)) -
1371 // Since we're moving the extend before the right shift operation, we need
1372 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1373 // the zeros which would get shifted in with the original right shift
1375 MSB = MSB > 31 ? 31 : MSB;
1377 Opc = VT == MVT::i32 ? ARM64::UBFMWri : ARM64::UBFMXri;
1381 static bool isOneBitExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1382 unsigned &LSB, unsigned &MSB) {
1383 // We are looking for the following pattern which basically extracts a single
1384 // bit from the source value and places it in the LSB of the destination
1385 // value, all other bits of the destination value or set to zero:
1387 // Value2 = AND Value, MaskImm
1388 // SRL Value2, ShiftImm
1390 // with MaskImm >> ShiftImm == 1.
1392 // This gets selected into a single UBFM:
1394 // UBFM Value, ShiftImm, ShiftImm
1397 if (N->getOpcode() != ISD::SRL)
1400 uint64_t And_mask = 0;
1401 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1404 Opd0 = N->getOperand(0).getOperand(0);
1406 uint64_t Srl_imm = 0;
1407 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1410 // Check whether we really have a one bit extract here.
1411 if (And_mask >> Srl_imm == 0x1) {
1412 if (N->getValueType(0) == MVT::i32)
1413 Opc = ARM64::UBFMWri;
1415 Opc = ARM64::UBFMXri;
1417 LSB = MSB = Srl_imm;
1425 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1426 unsigned &LSB, unsigned &MSB,
1427 bool BiggerPattern) {
1428 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1429 "N must be a SHR/SRA operation to call this function");
1431 EVT VT = N->getValueType(0);
1433 // Here we can test the type of VT and return false when the type does not
1434 // match, but since it is done prior to that call in the current context
1435 // we turned that into an assert to avoid redundant code.
1436 assert((VT == MVT::i32 || VT == MVT::i64) &&
1437 "Type checking must have been done before calling this function");
1439 // Check for AND + SRL doing a one bit extract.
1440 if (isOneBitExtractOpFromShr(N, Opc, Opd0, LSB, MSB))
1443 // we're looking for a shift of a shift
1444 uint64_t Shl_imm = 0;
1445 uint64_t Trunc_bits = 0;
1446 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1447 Opd0 = N->getOperand(0).getOperand(0);
1448 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1449 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1450 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1451 // be considered as setting high 32 bits as zero. Our strategy here is to
1452 // always generate 64bit UBFM. This consistency will help the CSE pass
1453 // later find more redundancy.
1454 Opd0 = N->getOperand(0).getOperand(0);
1455 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1456 VT = Opd0->getValueType(0);
1457 assert(VT == MVT::i64 && "the promoted type should be i64");
1458 } else if (BiggerPattern) {
1459 // Let's pretend a 0 shift left has been performed.
1460 // FIXME: Currently we limit this to the bigger pattern case,
1461 // because some optimizations expect AND and not UBFM
1462 Opd0 = N->getOperand(0);
1466 assert(Shl_imm < VT.getSizeInBits() && "bad amount in shift node!");
1467 uint64_t Srl_imm = 0;
1468 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1471 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1472 "bad amount in shift node!");
1473 // Note: The width operand is encoded as width-1.
1474 unsigned Width = VT.getSizeInBits() - Trunc_bits - Srl_imm - 1;
1475 int sLSB = Srl_imm - Shl_imm;
1480 // SRA requires a signed extraction
1482 Opc = N->getOpcode() == ISD::SRA ? ARM64::SBFMWri : ARM64::UBFMWri;
1484 Opc = N->getOpcode() == ISD::SRA ? ARM64::SBFMXri : ARM64::UBFMXri;
1488 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1489 SDValue &Opd0, unsigned &LSB, unsigned &MSB,
1490 unsigned NumberOfIgnoredLowBits = 0,
1491 bool BiggerPattern = false) {
1492 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1495 switch (N->getOpcode()) {
1497 if (!N->isMachineOpcode())
1501 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB, MSB,
1502 NumberOfIgnoredLowBits, BiggerPattern);
1505 return isBitfieldExtractOpFromShr(N, Opc, Opd0, LSB, MSB, BiggerPattern);
1508 unsigned NOpc = N->getMachineOpcode();
1512 case ARM64::SBFMWri:
1513 case ARM64::UBFMWri:
1514 case ARM64::SBFMXri:
1515 case ARM64::UBFMXri:
1517 Opd0 = N->getOperand(0);
1518 LSB = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1519 MSB = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1526 SDNode *ARM64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1527 unsigned Opc, LSB, MSB;
1529 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
1532 EVT VT = N->getValueType(0);
1534 // If the bit extract operation is 64bit but the original type is 32bit, we
1535 // need to add one EXTRACT_SUBREG.
1536 if ((Opc == ARM64::SBFMXri || Opc == ARM64::UBFMXri) && VT == MVT::i32) {
1537 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(LSB, MVT::i64),
1538 CurDAG->getTargetConstant(MSB, MVT::i64)};
1540 SDNode *BFM = CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i64, Ops64);
1541 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
1542 MachineSDNode *Node =
1543 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32,
1544 SDValue(BFM, 0), SubReg);
1548 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(LSB, VT),
1549 CurDAG->getTargetConstant(MSB, VT)};
1550 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1553 /// Does DstMask form a complementary pair with the mask provided by
1554 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1555 /// this asks whether DstMask zeroes precisely those bits that will be set by
1557 static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1558 unsigned NumberOfIgnoredHighBits, EVT VT) {
1559 assert((VT == MVT::i32 || VT == MVT::i64) &&
1560 "i32 or i64 mask type expected!");
1561 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1563 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1564 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1566 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1567 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1570 // Look for bits that will be useful for later uses.
1571 // A bit is consider useless as soon as it is dropped and never used
1572 // before it as been dropped.
1573 // E.g., looking for useful bit of x
1576 // After #1, x useful bits are 0x7, then the useful bits of x, live through
1578 // After #2, the useful bits of x are 0x4.
1579 // However, if x is used on an unpredicatable instruction, then all its bits
1585 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1587 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1590 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1591 Imm = ARM64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1592 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1593 getUsefulBits(Op, UsefulBits, Depth + 1);
1596 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1597 uint64_t Imm, uint64_t MSB,
1599 // inherit the bitwidth value
1600 APInt OpUsefulBits(UsefulBits);
1604 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1606 // The interesting part will be in the lower part of the result
1607 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1608 // The interesting part was starting at Imm in the argument
1609 OpUsefulBits = OpUsefulBits.shl(Imm);
1611 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1613 // The interesting part will be shifted in the result
1614 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1615 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1616 // The interesting part was at zero in the argument
1617 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1620 UsefulBits &= OpUsefulBits;
1623 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1626 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1628 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1630 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1633 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1635 uint64_t ShiftTypeAndValue =
1636 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1637 APInt Mask(UsefulBits);
1638 Mask.clearAllBits();
1641 if (ARM64_AM::getShiftType(ShiftTypeAndValue) == ARM64_AM::LSL) {
1643 uint64_t ShiftAmt = ARM64_AM::getShiftValue(ShiftTypeAndValue);
1644 Mask = Mask.shl(ShiftAmt);
1645 getUsefulBits(Op, Mask, Depth + 1);
1646 Mask = Mask.lshr(ShiftAmt);
1647 } else if (ARM64_AM::getShiftType(ShiftTypeAndValue) == ARM64_AM::LSR) {
1649 // We do not handle ARM64_AM::ASR, because the sign will change the
1650 // number of useful bits
1651 uint64_t ShiftAmt = ARM64_AM::getShiftValue(ShiftTypeAndValue);
1652 Mask = Mask.lshr(ShiftAmt);
1653 getUsefulBits(Op, Mask, Depth + 1);
1654 Mask = Mask.shl(ShiftAmt);
1661 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1664 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1666 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1668 if (Op.getOperand(1) == Orig)
1669 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1671 APInt OpUsefulBits(UsefulBits);
1675 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1677 UsefulBits &= ~OpUsefulBits;
1678 getUsefulBits(Op, UsefulBits, Depth + 1);
1680 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1682 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1683 getUsefulBits(Op, UsefulBits, Depth + 1);
1687 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1688 SDValue Orig, unsigned Depth) {
1690 // Users of this node should have already been instruction selected
1691 // FIXME: Can we turn that into an assert?
1692 if (!UserNode->isMachineOpcode())
1695 switch (UserNode->getMachineOpcode()) {
1698 case ARM64::ANDSWri:
1699 case ARM64::ANDSXri:
1702 // We increment Depth only when we call the getUsefulBits
1703 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1705 case ARM64::UBFMWri:
1706 case ARM64::UBFMXri:
1707 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1711 if (UserNode->getOperand(1) != Orig)
1713 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1717 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1721 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1724 // Initialize UsefulBits
1726 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1727 // At the beginning, assume every produced bits is useful
1728 UsefulBits = APInt(Bitwidth, 0);
1729 UsefulBits.flipAllBits();
1731 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1733 for (SDNode *Node : Op.getNode()->uses()) {
1734 // A use cannot produce useful bits
1735 APInt UsefulBitsForUse = APInt(UsefulBits);
1736 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1737 UsersUsefulBits |= UsefulBitsForUse;
1739 // UsefulBits contains the produced bits that are meaningful for the
1740 // current definition, thus a user cannot make a bit meaningful at
1742 UsefulBits &= UsersUsefulBits;
1745 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
1746 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1747 /// 0, return Op unchanged.
1748 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1752 EVT VT = Op.getValueType();
1753 unsigned BitWidth = VT.getSizeInBits();
1754 unsigned UBFMOpc = BitWidth == 32 ? ARM64::UBFMWri : ARM64::UBFMXri;
1757 if (ShlAmount > 0) {
1758 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1759 ShiftNode = CurDAG->getMachineNode(
1760 UBFMOpc, SDLoc(Op), VT, Op,
1761 CurDAG->getTargetConstant(BitWidth - ShlAmount, VT),
1762 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, VT));
1764 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1765 assert(ShlAmount < 0 && "expected right shift");
1766 int ShrAmount = -ShlAmount;
1767 ShiftNode = CurDAG->getMachineNode(
1768 UBFMOpc, SDLoc(Op), VT, Op, CurDAG->getTargetConstant(ShrAmount, VT),
1769 CurDAG->getTargetConstant(BitWidth - 1, VT));
1772 return SDValue(ShiftNode, 0);
1775 /// Does this tree qualify as an attempt to move a bitfield into position,
1776 /// essentially "(and (shl VAL, N), Mask)".
1777 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1778 SDValue &Src, int &ShiftAmount,
1780 EVT VT = Op.getValueType();
1781 unsigned BitWidth = VT.getSizeInBits();
1783 assert(BitWidth == 32 || BitWidth == 64);
1785 APInt KnownZero, KnownOne;
1786 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
1788 // Non-zero in the sense that they're not provably zero, which is the key
1789 // point if we want to use this value
1790 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1792 // Discard a constant AND mask if present. It's safe because the node will
1793 // already have been factored into the computeKnownBits calculation above.
1795 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1796 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1797 Op = Op.getOperand(0);
1801 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1803 Op = Op.getOperand(0);
1805 if (!isShiftedMask_64(NonZeroBits))
1808 ShiftAmount = countTrailingZeros(NonZeroBits);
1809 MaskWidth = CountTrailingOnes_64(NonZeroBits >> ShiftAmount);
1811 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1812 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1814 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1819 // Given a OR operation, check if we have the following pattern
1820 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
1821 // isBitfieldExtractOp)
1822 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1823 // countTrailingZeros(mask2) == imm2 - imm + 1
1825 // if yes, given reference arguments will be update so that one can replace
1826 // the OR instruction with:
1827 // f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1828 static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1829 SDValue &Src, unsigned &ImmR,
1830 unsigned &ImmS, SelectionDAG *CurDAG) {
1831 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1834 EVT VT = N->getValueType(0);
1836 Opc = ARM64::BFMWri;
1837 else if (VT == MVT::i64)
1838 Opc = ARM64::BFMXri;
1842 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1843 // have the expected shape. Try to undo that.
1845 getUsefulBits(SDValue(N, 0), UsefulBits);
1847 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1848 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1850 // OR is commutative, check both possibilities (does llvm provide a
1851 // way to do that directely, e.g., via code matcher?)
1852 SDValue OrOpd1Val = N->getOperand(1);
1853 SDNode *OrOpd0 = N->getOperand(0).getNode();
1854 SDNode *OrOpd1 = N->getOperand(1).getNode();
1855 for (int i = 0; i < 2;
1856 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
1859 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
1860 NumberOfIgnoredLowBits, true)) {
1861 // Check that the returned opcode is compatible with the pattern,
1862 // i.e., same type and zero extended (U and not S)
1863 if ((BFXOpc != ARM64::UBFMXri && VT == MVT::i64) ||
1864 (BFXOpc != ARM64::UBFMWri && VT == MVT::i32))
1867 // Compute the width of the bitfield insertion
1869 Width = ImmS - ImmR + 1;
1870 // FIXME: This constraint is to catch bitfield insertion we may
1871 // want to widen the pattern if we want to grab general bitfied
1876 // If the mask on the insertee is correct, we have a BFXIL operation. We
1877 // can share the ImmR and ImmS values from the already-computed UBFM.
1878 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0), Src,
1880 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
1885 // Check the second part of the pattern
1886 EVT VT = OrOpd1->getValueType(0);
1887 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
1889 // Compute the Known Zero for the candidate of the first operand.
1890 // This allows to catch more general case than just looking for
1891 // AND with imm. Indeed, simplify-demanded-bits may have removed
1892 // the AND instruction because it proves it was useless.
1893 APInt KnownZero, KnownOne;
1894 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
1896 // Check if there is enough room for the second operand to appear
1898 APInt BitsToBeInserted =
1899 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
1901 if ((BitsToBeInserted & ~KnownZero) != 0)
1904 // Set the first operand
1906 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
1907 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
1908 // In that case, we can eliminate the AND
1909 Dst = OrOpd1->getOperand(0);
1911 // Maybe the AND has been removed by simplify-demanded-bits
1912 // or is useful because it discards more bits
1922 SDNode *ARM64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
1923 if (N->getOpcode() != ISD::OR)
1930 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
1933 EVT VT = N->getValueType(0);
1934 SDValue Ops[] = { Opd0,
1936 CurDAG->getTargetConstant(LSB, VT),
1937 CurDAG->getTargetConstant(MSB, VT) };
1938 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1941 SDNode *ARM64DAGToDAGISel::SelectLIBM(SDNode *N) {
1942 EVT VT = N->getValueType(0);
1945 unsigned FRINTXOpcs[] = { ARM64::FRINTXSr, ARM64::FRINTXDr };
1947 if (VT == MVT::f32) {
1949 } else if (VT == MVT::f64) {
1952 return nullptr; // Unrecognized argument type. Fall back on default codegen.
1954 // Pick the FRINTX variant needed to set the flags.
1955 unsigned FRINTXOpc = FRINTXOpcs[Variant];
1957 switch (N->getOpcode()) {
1959 return nullptr; // Unrecognized libm ISD node. Fall back on default codegen.
1961 unsigned FRINTPOpcs[] = { ARM64::FRINTPSr, ARM64::FRINTPDr };
1962 Opc = FRINTPOpcs[Variant];
1966 unsigned FRINTMOpcs[] = { ARM64::FRINTMSr, ARM64::FRINTMDr };
1967 Opc = FRINTMOpcs[Variant];
1971 unsigned FRINTZOpcs[] = { ARM64::FRINTZSr, ARM64::FRINTZDr };
1972 Opc = FRINTZOpcs[Variant];
1976 unsigned FRINTAOpcs[] = { ARM64::FRINTASr, ARM64::FRINTADr };
1977 Opc = FRINTAOpcs[Variant];
1983 SDValue In = N->getOperand(0);
1984 SmallVector<SDValue, 2> Ops;
1987 if (!TM.Options.UnsafeFPMath) {
1988 SDNode *FRINTX = CurDAG->getMachineNode(FRINTXOpc, dl, VT, MVT::Glue, In);
1989 Ops.push_back(SDValue(FRINTX, 1));
1992 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
1996 ARM64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
1997 unsigned RegWidth) {
1999 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2000 FVal = CN->getValueAPF();
2001 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2002 // Some otherwise illegal constants are allowed in this case.
2003 if (LN->getOperand(1).getOpcode() != ARM64ISD::ADDlow ||
2004 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2007 ConstantPoolSDNode *CN =
2008 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2009 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2013 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2014 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2017 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2018 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2022 // fbits is between 1 and 64 in the worst-case, which means the fmul
2023 // could have 2^64 as an actual operand. Need 65 bits of precision.
2024 APSInt IntVal(65, true);
2025 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2027 // N.b. isPowerOf2 also checks for > 0.
2028 if (!IsExact || !IntVal.isPowerOf2()) return false;
2029 unsigned FBits = IntVal.logBase2();
2031 // Checks above should have guaranteed that we haven't lost information in
2032 // finding FBits, but it must still be in range.
2033 if (FBits == 0 || FBits > RegWidth) return false;
2035 FixedPos = CurDAG->getTargetConstant(FBits, MVT::i32);
2039 SDNode *ARM64DAGToDAGISel::Select(SDNode *Node) {
2040 // Dump information about the Node being selected
2041 DEBUG(errs() << "Selecting: ");
2042 DEBUG(Node->dump(CurDAG));
2043 DEBUG(errs() << "\n");
2045 // If we have a custom node, we already have selected!
2046 if (Node->isMachineOpcode()) {
2047 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2048 Node->setNodeId(-1);
2052 // Few custom selection stuff.
2053 SDNode *ResNode = nullptr;
2054 EVT VT = Node->getValueType(0);
2056 switch (Node->getOpcode()) {
2061 if (SDNode *I = SelectMLAV64LaneV128(Node))
2066 // Try to select as an indexed load. Fall through to normal processing
2069 SDNode *I = SelectIndexedLoad(Node, Done);
2078 if (SDNode *I = SelectBitfieldExtractOp(Node))
2083 if (SDNode *I = SelectBitfieldInsertOp(Node))
2087 case ISD::EXTRACT_VECTOR_ELT: {
2088 // Extracting lane zero is a special case where we can just use a plain
2089 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2090 // the rest of the compiler, especially the register allocator and copyi
2091 // propagation, to reason about, so is preferred when it's possible to
2093 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2094 // Bail and use the default Select() for non-zero lanes.
2095 if (LaneNode->getZExtValue() != 0)
2097 // If the element type is not the same as the result type, likewise
2098 // bail and use the default Select(), as there's more to do than just
2099 // a cross-class COPY. This catches extracts of i8 and i16 elements
2100 // since they will need an explicit zext.
2101 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2104 switch (Node->getOperand(0)
2106 .getVectorElementType()
2109 assert(0 && "Unexpected vector element type!");
2111 SubReg = ARM64::dsub;
2114 SubReg = ARM64::ssub;
2116 case 16: // FALLTHROUGH
2118 llvm_unreachable("unexpected zext-requiring extract element!");
2120 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2121 Node->getOperand(0));
2122 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2123 DEBUG(Extract->dumpr(CurDAG));
2124 DEBUG(dbgs() << "\n");
2125 return Extract.getNode();
2127 case ISD::Constant: {
2128 // Materialize zero constants as copies from WZR/XZR. This allows
2129 // the coalescer to propagate these into other instructions.
2130 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2131 if (ConstNode->isNullValue()) {
2133 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2134 ARM64::WZR, MVT::i32).getNode();
2135 else if (VT == MVT::i64)
2136 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2137 ARM64::XZR, MVT::i64).getNode();
2142 case ISD::FrameIndex: {
2143 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2144 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2145 unsigned Shifter = ARM64_AM::getShifterImm(ARM64_AM::LSL, 0);
2146 const TargetLowering *TLI = getTargetLowering();
2147 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
2148 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2149 CurDAG->getTargetConstant(Shifter, MVT::i32) };
2150 return CurDAG->SelectNodeTo(Node, ARM64::ADDXri, MVT::i64, Ops);
2152 case ISD::INTRINSIC_W_CHAIN: {
2153 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2157 case Intrinsic::arm64_ldaxp:
2158 case Intrinsic::arm64_ldxp: {
2160 IntNo == Intrinsic::arm64_ldaxp ? ARM64::LDAXPX : ARM64::LDXPX;
2161 SDValue MemAddr = Node->getOperand(2);
2163 SDValue Chain = Node->getOperand(0);
2165 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2166 MVT::Other, MemAddr, Chain);
2168 // Transfer memoperands.
2169 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2170 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2171 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2174 case Intrinsic::arm64_stlxp:
2175 case Intrinsic::arm64_stxp: {
2177 IntNo == Intrinsic::arm64_stlxp ? ARM64::STLXPX : ARM64::STXPX;
2179 SDValue Chain = Node->getOperand(0);
2180 SDValue ValLo = Node->getOperand(2);
2181 SDValue ValHi = Node->getOperand(3);
2182 SDValue MemAddr = Node->getOperand(4);
2184 // Place arguments in the right order.
2185 SmallVector<SDValue, 7> Ops;
2186 Ops.push_back(ValLo);
2187 Ops.push_back(ValHi);
2188 Ops.push_back(MemAddr);
2189 Ops.push_back(Chain);
2191 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2192 // Transfer memoperands.
2193 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2194 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2195 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2199 case Intrinsic::arm64_neon_ld1x2:
2200 if (VT == MVT::v8i8)
2201 return SelectLoad(Node, 2, ARM64::LD1Twov8b, ARM64::dsub0);
2202 else if (VT == MVT::v16i8)
2203 return SelectLoad(Node, 2, ARM64::LD1Twov16b, ARM64::qsub0);
2204 else if (VT == MVT::v4i16)
2205 return SelectLoad(Node, 2, ARM64::LD1Twov4h, ARM64::dsub0);
2206 else if (VT == MVT::v8i16)
2207 return SelectLoad(Node, 2, ARM64::LD1Twov8h, ARM64::qsub0);
2208 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2209 return SelectLoad(Node, 2, ARM64::LD1Twov2s, ARM64::dsub0);
2210 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2211 return SelectLoad(Node, 2, ARM64::LD1Twov4s, ARM64::qsub0);
2212 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2213 return SelectLoad(Node, 2, ARM64::LD1Twov1d, ARM64::dsub0);
2214 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2215 return SelectLoad(Node, 2, ARM64::LD1Twov2d, ARM64::qsub0);
2217 case Intrinsic::arm64_neon_ld1x3:
2218 if (VT == MVT::v8i8)
2219 return SelectLoad(Node, 3, ARM64::LD1Threev8b, ARM64::dsub0);
2220 else if (VT == MVT::v16i8)
2221 return SelectLoad(Node, 3, ARM64::LD1Threev16b, ARM64::qsub0);
2222 else if (VT == MVT::v4i16)
2223 return SelectLoad(Node, 3, ARM64::LD1Threev4h, ARM64::dsub0);
2224 else if (VT == MVT::v8i16)
2225 return SelectLoad(Node, 3, ARM64::LD1Threev8h, ARM64::qsub0);
2226 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2227 return SelectLoad(Node, 3, ARM64::LD1Threev2s, ARM64::dsub0);
2228 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2229 return SelectLoad(Node, 3, ARM64::LD1Threev4s, ARM64::qsub0);
2230 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2231 return SelectLoad(Node, 3, ARM64::LD1Threev1d, ARM64::dsub0);
2232 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2233 return SelectLoad(Node, 3, ARM64::LD1Threev2d, ARM64::qsub0);
2235 case Intrinsic::arm64_neon_ld1x4:
2236 if (VT == MVT::v8i8)
2237 return SelectLoad(Node, 4, ARM64::LD1Fourv8b, ARM64::dsub0);
2238 else if (VT == MVT::v16i8)
2239 return SelectLoad(Node, 4, ARM64::LD1Fourv16b, ARM64::qsub0);
2240 else if (VT == MVT::v4i16)
2241 return SelectLoad(Node, 4, ARM64::LD1Fourv4h, ARM64::dsub0);
2242 else if (VT == MVT::v8i16)
2243 return SelectLoad(Node, 4, ARM64::LD1Fourv8h, ARM64::qsub0);
2244 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2245 return SelectLoad(Node, 4, ARM64::LD1Fourv2s, ARM64::dsub0);
2246 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2247 return SelectLoad(Node, 4, ARM64::LD1Fourv4s, ARM64::qsub0);
2248 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2249 return SelectLoad(Node, 4, ARM64::LD1Fourv1d, ARM64::dsub0);
2250 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2251 return SelectLoad(Node, 4, ARM64::LD1Fourv2d, ARM64::qsub0);
2253 case Intrinsic::arm64_neon_ld2:
2254 if (VT == MVT::v8i8)
2255 return SelectLoad(Node, 2, ARM64::LD2Twov8b, ARM64::dsub0);
2256 else if (VT == MVT::v16i8)
2257 return SelectLoad(Node, 2, ARM64::LD2Twov16b, ARM64::qsub0);
2258 else if (VT == MVT::v4i16)
2259 return SelectLoad(Node, 2, ARM64::LD2Twov4h, ARM64::dsub0);
2260 else if (VT == MVT::v8i16)
2261 return SelectLoad(Node, 2, ARM64::LD2Twov8h, ARM64::qsub0);
2262 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2263 return SelectLoad(Node, 2, ARM64::LD2Twov2s, ARM64::dsub0);
2264 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2265 return SelectLoad(Node, 2, ARM64::LD2Twov4s, ARM64::qsub0);
2266 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2267 return SelectLoad(Node, 2, ARM64::LD1Twov1d, ARM64::dsub0);
2268 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2269 return SelectLoad(Node, 2, ARM64::LD2Twov2d, ARM64::qsub0);
2271 case Intrinsic::arm64_neon_ld3:
2272 if (VT == MVT::v8i8)
2273 return SelectLoad(Node, 3, ARM64::LD3Threev8b, ARM64::dsub0);
2274 else if (VT == MVT::v16i8)
2275 return SelectLoad(Node, 3, ARM64::LD3Threev16b, ARM64::qsub0);
2276 else if (VT == MVT::v4i16)
2277 return SelectLoad(Node, 3, ARM64::LD3Threev4h, ARM64::dsub0);
2278 else if (VT == MVT::v8i16)
2279 return SelectLoad(Node, 3, ARM64::LD3Threev8h, ARM64::qsub0);
2280 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2281 return SelectLoad(Node, 3, ARM64::LD3Threev2s, ARM64::dsub0);
2282 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2283 return SelectLoad(Node, 3, ARM64::LD3Threev4s, ARM64::qsub0);
2284 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2285 return SelectLoad(Node, 3, ARM64::LD1Threev1d, ARM64::dsub0);
2286 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2287 return SelectLoad(Node, 3, ARM64::LD3Threev2d, ARM64::qsub0);
2289 case Intrinsic::arm64_neon_ld4:
2290 if (VT == MVT::v8i8)
2291 return SelectLoad(Node, 4, ARM64::LD4Fourv8b, ARM64::dsub0);
2292 else if (VT == MVT::v16i8)
2293 return SelectLoad(Node, 4, ARM64::LD4Fourv16b, ARM64::qsub0);
2294 else if (VT == MVT::v4i16)
2295 return SelectLoad(Node, 4, ARM64::LD4Fourv4h, ARM64::dsub0);
2296 else if (VT == MVT::v8i16)
2297 return SelectLoad(Node, 4, ARM64::LD4Fourv8h, ARM64::qsub0);
2298 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2299 return SelectLoad(Node, 4, ARM64::LD4Fourv2s, ARM64::dsub0);
2300 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2301 return SelectLoad(Node, 4, ARM64::LD4Fourv4s, ARM64::qsub0);
2302 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2303 return SelectLoad(Node, 4, ARM64::LD1Fourv1d, ARM64::dsub0);
2304 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2305 return SelectLoad(Node, 4, ARM64::LD4Fourv2d, ARM64::qsub0);
2307 case Intrinsic::arm64_neon_ld2r:
2308 if (VT == MVT::v8i8)
2309 return SelectLoad(Node, 2, ARM64::LD2Rv8b, ARM64::dsub0);
2310 else if (VT == MVT::v16i8)
2311 return SelectLoad(Node, 2, ARM64::LD2Rv16b, ARM64::qsub0);
2312 else if (VT == MVT::v4i16)
2313 return SelectLoad(Node, 2, ARM64::LD2Rv4h, ARM64::dsub0);
2314 else if (VT == MVT::v8i16)
2315 return SelectLoad(Node, 2, ARM64::LD2Rv8h, ARM64::qsub0);
2316 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2317 return SelectLoad(Node, 2, ARM64::LD2Rv2s, ARM64::dsub0);
2318 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2319 return SelectLoad(Node, 2, ARM64::LD2Rv4s, ARM64::qsub0);
2320 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2321 return SelectLoad(Node, 2, ARM64::LD2Rv1d, ARM64::dsub0);
2322 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2323 return SelectLoad(Node, 2, ARM64::LD2Rv2d, ARM64::qsub0);
2325 case Intrinsic::arm64_neon_ld3r:
2326 if (VT == MVT::v8i8)
2327 return SelectLoad(Node, 3, ARM64::LD3Rv8b, ARM64::dsub0);
2328 else if (VT == MVT::v16i8)
2329 return SelectLoad(Node, 3, ARM64::LD3Rv16b, ARM64::qsub0);
2330 else if (VT == MVT::v4i16)
2331 return SelectLoad(Node, 3, ARM64::LD3Rv4h, ARM64::dsub0);
2332 else if (VT == MVT::v8i16)
2333 return SelectLoad(Node, 3, ARM64::LD3Rv8h, ARM64::qsub0);
2334 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2335 return SelectLoad(Node, 3, ARM64::LD3Rv2s, ARM64::dsub0);
2336 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2337 return SelectLoad(Node, 3, ARM64::LD3Rv4s, ARM64::qsub0);
2338 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2339 return SelectLoad(Node, 3, ARM64::LD3Rv1d, ARM64::dsub0);
2340 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2341 return SelectLoad(Node, 3, ARM64::LD3Rv2d, ARM64::qsub0);
2343 case Intrinsic::arm64_neon_ld4r:
2344 if (VT == MVT::v8i8)
2345 return SelectLoad(Node, 4, ARM64::LD4Rv8b, ARM64::dsub0);
2346 else if (VT == MVT::v16i8)
2347 return SelectLoad(Node, 4, ARM64::LD4Rv16b, ARM64::qsub0);
2348 else if (VT == MVT::v4i16)
2349 return SelectLoad(Node, 4, ARM64::LD4Rv4h, ARM64::dsub0);
2350 else if (VT == MVT::v8i16)
2351 return SelectLoad(Node, 4, ARM64::LD4Rv8h, ARM64::qsub0);
2352 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2353 return SelectLoad(Node, 4, ARM64::LD4Rv2s, ARM64::dsub0);
2354 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2355 return SelectLoad(Node, 4, ARM64::LD4Rv4s, ARM64::qsub0);
2356 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2357 return SelectLoad(Node, 4, ARM64::LD4Rv1d, ARM64::dsub0);
2358 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2359 return SelectLoad(Node, 4, ARM64::LD4Rv2d, ARM64::qsub0);
2361 case Intrinsic::arm64_neon_ld2lane:
2362 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2363 return SelectLoadLane(Node, 2, ARM64::LD2i8);
2364 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2365 return SelectLoadLane(Node, 2, ARM64::LD2i16);
2366 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2368 return SelectLoadLane(Node, 2, ARM64::LD2i32);
2369 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2371 return SelectLoadLane(Node, 2, ARM64::LD2i64);
2373 case Intrinsic::arm64_neon_ld3lane:
2374 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2375 return SelectLoadLane(Node, 3, ARM64::LD3i8);
2376 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2377 return SelectLoadLane(Node, 3, ARM64::LD3i16);
2378 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2380 return SelectLoadLane(Node, 3, ARM64::LD3i32);
2381 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2383 return SelectLoadLane(Node, 3, ARM64::LD3i64);
2385 case Intrinsic::arm64_neon_ld4lane:
2386 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2387 return SelectLoadLane(Node, 4, ARM64::LD4i8);
2388 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2389 return SelectLoadLane(Node, 4, ARM64::LD4i16);
2390 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2392 return SelectLoadLane(Node, 4, ARM64::LD4i32);
2393 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2395 return SelectLoadLane(Node, 4, ARM64::LD4i64);
2399 case ISD::INTRINSIC_WO_CHAIN: {
2400 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2404 case Intrinsic::arm64_neon_tbl2:
2405 return SelectTable(Node, 2, VT == MVT::v8i8 ? ARM64::TBLv8i8Two
2406 : ARM64::TBLv16i8Two,
2408 case Intrinsic::arm64_neon_tbl3:
2409 return SelectTable(Node, 3, VT == MVT::v8i8 ? ARM64::TBLv8i8Three
2410 : ARM64::TBLv16i8Three,
2412 case Intrinsic::arm64_neon_tbl4:
2413 return SelectTable(Node, 4, VT == MVT::v8i8 ? ARM64::TBLv8i8Four
2414 : ARM64::TBLv16i8Four,
2416 case Intrinsic::arm64_neon_tbx2:
2417 return SelectTable(Node, 2, VT == MVT::v8i8 ? ARM64::TBXv8i8Two
2418 : ARM64::TBXv16i8Two,
2420 case Intrinsic::arm64_neon_tbx3:
2421 return SelectTable(Node, 3, VT == MVT::v8i8 ? ARM64::TBXv8i8Three
2422 : ARM64::TBXv16i8Three,
2424 case Intrinsic::arm64_neon_tbx4:
2425 return SelectTable(Node, 4, VT == MVT::v8i8 ? ARM64::TBXv8i8Four
2426 : ARM64::TBXv16i8Four,
2428 case Intrinsic::arm64_neon_smull:
2429 case Intrinsic::arm64_neon_umull:
2430 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2436 case ISD::INTRINSIC_VOID: {
2437 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2438 if (Node->getNumOperands() >= 3)
2439 VT = Node->getOperand(2)->getValueType(0);
2443 case Intrinsic::arm64_neon_st1x2: {
2444 if (VT == MVT::v8i8)
2445 return SelectStore(Node, 2, ARM64::ST1Twov8b);
2446 else if (VT == MVT::v16i8)
2447 return SelectStore(Node, 2, ARM64::ST1Twov16b);
2448 else if (VT == MVT::v4i16)
2449 return SelectStore(Node, 2, ARM64::ST1Twov4h);
2450 else if (VT == MVT::v8i16)
2451 return SelectStore(Node, 2, ARM64::ST1Twov8h);
2452 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2453 return SelectStore(Node, 2, ARM64::ST1Twov2s);
2454 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2455 return SelectStore(Node, 2, ARM64::ST1Twov4s);
2456 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2457 return SelectStore(Node, 2, ARM64::ST1Twov2d);
2458 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2459 return SelectStore(Node, 2, ARM64::ST1Twov1d);
2462 case Intrinsic::arm64_neon_st1x3: {
2463 if (VT == MVT::v8i8)
2464 return SelectStore(Node, 3, ARM64::ST1Threev8b);
2465 else if (VT == MVT::v16i8)
2466 return SelectStore(Node, 3, ARM64::ST1Threev16b);
2467 else if (VT == MVT::v4i16)
2468 return SelectStore(Node, 3, ARM64::ST1Threev4h);
2469 else if (VT == MVT::v8i16)
2470 return SelectStore(Node, 3, ARM64::ST1Threev8h);
2471 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2472 return SelectStore(Node, 3, ARM64::ST1Threev2s);
2473 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2474 return SelectStore(Node, 3, ARM64::ST1Threev4s);
2475 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2476 return SelectStore(Node, 3, ARM64::ST1Threev2d);
2477 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2478 return SelectStore(Node, 3, ARM64::ST1Threev1d);
2481 case Intrinsic::arm64_neon_st1x4: {
2482 if (VT == MVT::v8i8)
2483 return SelectStore(Node, 4, ARM64::ST1Fourv8b);
2484 else if (VT == MVT::v16i8)
2485 return SelectStore(Node, 4, ARM64::ST1Fourv16b);
2486 else if (VT == MVT::v4i16)
2487 return SelectStore(Node, 4, ARM64::ST1Fourv4h);
2488 else if (VT == MVT::v8i16)
2489 return SelectStore(Node, 4, ARM64::ST1Fourv8h);
2490 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2491 return SelectStore(Node, 4, ARM64::ST1Fourv2s);
2492 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2493 return SelectStore(Node, 4, ARM64::ST1Fourv4s);
2494 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2495 return SelectStore(Node, 4, ARM64::ST1Fourv2d);
2496 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2497 return SelectStore(Node, 4, ARM64::ST1Fourv1d);
2500 case Intrinsic::arm64_neon_st2: {
2501 if (VT == MVT::v8i8)
2502 return SelectStore(Node, 2, ARM64::ST2Twov8b);
2503 else if (VT == MVT::v16i8)
2504 return SelectStore(Node, 2, ARM64::ST2Twov16b);
2505 else if (VT == MVT::v4i16)
2506 return SelectStore(Node, 2, ARM64::ST2Twov4h);
2507 else if (VT == MVT::v8i16)
2508 return SelectStore(Node, 2, ARM64::ST2Twov8h);
2509 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2510 return SelectStore(Node, 2, ARM64::ST2Twov2s);
2511 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2512 return SelectStore(Node, 2, ARM64::ST2Twov4s);
2513 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2514 return SelectStore(Node, 2, ARM64::ST2Twov2d);
2515 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2516 return SelectStore(Node, 2, ARM64::ST1Twov1d);
2519 case Intrinsic::arm64_neon_st3: {
2520 if (VT == MVT::v8i8)
2521 return SelectStore(Node, 3, ARM64::ST3Threev8b);
2522 else if (VT == MVT::v16i8)
2523 return SelectStore(Node, 3, ARM64::ST3Threev16b);
2524 else if (VT == MVT::v4i16)
2525 return SelectStore(Node, 3, ARM64::ST3Threev4h);
2526 else if (VT == MVT::v8i16)
2527 return SelectStore(Node, 3, ARM64::ST3Threev8h);
2528 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2529 return SelectStore(Node, 3, ARM64::ST3Threev2s);
2530 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2531 return SelectStore(Node, 3, ARM64::ST3Threev4s);
2532 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2533 return SelectStore(Node, 3, ARM64::ST3Threev2d);
2534 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2535 return SelectStore(Node, 3, ARM64::ST1Threev1d);
2538 case Intrinsic::arm64_neon_st4: {
2539 if (VT == MVT::v8i8)
2540 return SelectStore(Node, 4, ARM64::ST4Fourv8b);
2541 else if (VT == MVT::v16i8)
2542 return SelectStore(Node, 4, ARM64::ST4Fourv16b);
2543 else if (VT == MVT::v4i16)
2544 return SelectStore(Node, 4, ARM64::ST4Fourv4h);
2545 else if (VT == MVT::v8i16)
2546 return SelectStore(Node, 4, ARM64::ST4Fourv8h);
2547 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2548 return SelectStore(Node, 4, ARM64::ST4Fourv2s);
2549 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2550 return SelectStore(Node, 4, ARM64::ST4Fourv4s);
2551 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2552 return SelectStore(Node, 4, ARM64::ST4Fourv2d);
2553 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2554 return SelectStore(Node, 4, ARM64::ST1Fourv1d);
2557 case Intrinsic::arm64_neon_st2lane: {
2558 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2559 return SelectStoreLane(Node, 2, ARM64::ST2i8);
2560 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2561 return SelectStoreLane(Node, 2, ARM64::ST2i16);
2562 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2564 return SelectStoreLane(Node, 2, ARM64::ST2i32);
2565 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2567 return SelectStoreLane(Node, 2, ARM64::ST2i64);
2570 case Intrinsic::arm64_neon_st3lane: {
2571 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2572 return SelectStoreLane(Node, 3, ARM64::ST3i8);
2573 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2574 return SelectStoreLane(Node, 3, ARM64::ST3i16);
2575 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2577 return SelectStoreLane(Node, 3, ARM64::ST3i32);
2578 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2580 return SelectStoreLane(Node, 3, ARM64::ST3i64);
2583 case Intrinsic::arm64_neon_st4lane: {
2584 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2585 return SelectStoreLane(Node, 4, ARM64::ST4i8);
2586 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2587 return SelectStoreLane(Node, 4, ARM64::ST4i16);
2588 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2590 return SelectStoreLane(Node, 4, ARM64::ST4i32);
2591 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2593 return SelectStoreLane(Node, 4, ARM64::ST4i64);
2598 case ARM64ISD::LD2post: {
2599 if (VT == MVT::v8i8)
2600 return SelectPostLoad(Node, 2, ARM64::LD2Twov8b_POST, ARM64::dsub0);
2601 else if (VT == MVT::v16i8)
2602 return SelectPostLoad(Node, 2, ARM64::LD2Twov16b_POST, ARM64::qsub0);
2603 else if (VT == MVT::v4i16)
2604 return SelectPostLoad(Node, 2, ARM64::LD2Twov4h_POST, ARM64::dsub0);
2605 else if (VT == MVT::v8i16)
2606 return SelectPostLoad(Node, 2, ARM64::LD2Twov8h_POST, ARM64::qsub0);
2607 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2608 return SelectPostLoad(Node, 2, ARM64::LD2Twov2s_POST, ARM64::dsub0);
2609 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2610 return SelectPostLoad(Node, 2, ARM64::LD2Twov4s_POST, ARM64::qsub0);
2611 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2612 return SelectPostLoad(Node, 2, ARM64::LD1Twov1d_POST, ARM64::dsub0);
2613 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2614 return SelectPostLoad(Node, 2, ARM64::LD2Twov2d_POST, ARM64::qsub0);
2617 case ARM64ISD::LD3post: {
2618 if (VT == MVT::v8i8)
2619 return SelectPostLoad(Node, 3, ARM64::LD3Threev8b_POST, ARM64::dsub0);
2620 else if (VT == MVT::v16i8)
2621 return SelectPostLoad(Node, 3, ARM64::LD3Threev16b_POST, ARM64::qsub0);
2622 else if (VT == MVT::v4i16)
2623 return SelectPostLoad(Node, 3, ARM64::LD3Threev4h_POST, ARM64::dsub0);
2624 else if (VT == MVT::v8i16)
2625 return SelectPostLoad(Node, 3, ARM64::LD3Threev8h_POST, ARM64::qsub0);
2626 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2627 return SelectPostLoad(Node, 3, ARM64::LD3Threev2s_POST, ARM64::dsub0);
2628 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2629 return SelectPostLoad(Node, 3, ARM64::LD3Threev4s_POST, ARM64::qsub0);
2630 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2631 return SelectPostLoad(Node, 3, ARM64::LD1Threev1d_POST, ARM64::dsub0);
2632 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2633 return SelectPostLoad(Node, 3, ARM64::LD3Threev2d_POST, ARM64::qsub0);
2636 case ARM64ISD::LD4post: {
2637 if (VT == MVT::v8i8)
2638 return SelectPostLoad(Node, 4, ARM64::LD4Fourv8b_POST, ARM64::dsub0);
2639 else if (VT == MVT::v16i8)
2640 return SelectPostLoad(Node, 4, ARM64::LD4Fourv16b_POST, ARM64::qsub0);
2641 else if (VT == MVT::v4i16)
2642 return SelectPostLoad(Node, 4, ARM64::LD4Fourv4h_POST, ARM64::dsub0);
2643 else if (VT == MVT::v8i16)
2644 return SelectPostLoad(Node, 4, ARM64::LD4Fourv8h_POST, ARM64::qsub0);
2645 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2646 return SelectPostLoad(Node, 4, ARM64::LD4Fourv2s_POST, ARM64::dsub0);
2647 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2648 return SelectPostLoad(Node, 4, ARM64::LD4Fourv4s_POST, ARM64::qsub0);
2649 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2650 return SelectPostLoad(Node, 4, ARM64::LD1Fourv1d_POST, ARM64::dsub0);
2651 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2652 return SelectPostLoad(Node, 4, ARM64::LD4Fourv2d_POST, ARM64::qsub0);
2655 case ARM64ISD::LD1x2post: {
2656 if (VT == MVT::v8i8)
2657 return SelectPostLoad(Node, 2, ARM64::LD1Twov8b_POST, ARM64::dsub0);
2658 else if (VT == MVT::v16i8)
2659 return SelectPostLoad(Node, 2, ARM64::LD1Twov16b_POST, ARM64::qsub0);
2660 else if (VT == MVT::v4i16)
2661 return SelectPostLoad(Node, 2, ARM64::LD1Twov4h_POST, ARM64::dsub0);
2662 else if (VT == MVT::v8i16)
2663 return SelectPostLoad(Node, 2, ARM64::LD1Twov8h_POST, ARM64::qsub0);
2664 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2665 return SelectPostLoad(Node, 2, ARM64::LD1Twov2s_POST, ARM64::dsub0);
2666 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2667 return SelectPostLoad(Node, 2, ARM64::LD1Twov4s_POST, ARM64::qsub0);
2668 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2669 return SelectPostLoad(Node, 2, ARM64::LD1Twov1d_POST, ARM64::dsub0);
2670 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2671 return SelectPostLoad(Node, 2, ARM64::LD1Twov2d_POST, ARM64::qsub0);
2674 case ARM64ISD::LD1x3post: {
2675 if (VT == MVT::v8i8)
2676 return SelectPostLoad(Node, 3, ARM64::LD1Threev8b_POST, ARM64::dsub0);
2677 else if (VT == MVT::v16i8)
2678 return SelectPostLoad(Node, 3, ARM64::LD1Threev16b_POST, ARM64::qsub0);
2679 else if (VT == MVT::v4i16)
2680 return SelectPostLoad(Node, 3, ARM64::LD1Threev4h_POST, ARM64::dsub0);
2681 else if (VT == MVT::v8i16)
2682 return SelectPostLoad(Node, 3, ARM64::LD1Threev8h_POST, ARM64::qsub0);
2683 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2684 return SelectPostLoad(Node, 3, ARM64::LD1Threev2s_POST, ARM64::dsub0);
2685 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2686 return SelectPostLoad(Node, 3, ARM64::LD1Threev4s_POST, ARM64::qsub0);
2687 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2688 return SelectPostLoad(Node, 3, ARM64::LD1Threev1d_POST, ARM64::dsub0);
2689 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2690 return SelectPostLoad(Node, 3, ARM64::LD1Threev2d_POST, ARM64::qsub0);
2693 case ARM64ISD::LD1x4post: {
2694 if (VT == MVT::v8i8)
2695 return SelectPostLoad(Node, 4, ARM64::LD1Fourv8b_POST, ARM64::dsub0);
2696 else if (VT == MVT::v16i8)
2697 return SelectPostLoad(Node, 4, ARM64::LD1Fourv16b_POST, ARM64::qsub0);
2698 else if (VT == MVT::v4i16)
2699 return SelectPostLoad(Node, 4, ARM64::LD1Fourv4h_POST, ARM64::dsub0);
2700 else if (VT == MVT::v8i16)
2701 return SelectPostLoad(Node, 4, ARM64::LD1Fourv8h_POST, ARM64::qsub0);
2702 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2703 return SelectPostLoad(Node, 4, ARM64::LD1Fourv2s_POST, ARM64::dsub0);
2704 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2705 return SelectPostLoad(Node, 4, ARM64::LD1Fourv4s_POST, ARM64::qsub0);
2706 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2707 return SelectPostLoad(Node, 4, ARM64::LD1Fourv1d_POST, ARM64::dsub0);
2708 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2709 return SelectPostLoad(Node, 4, ARM64::LD1Fourv2d_POST, ARM64::qsub0);
2712 case ARM64ISD::LD1DUPpost: {
2713 if (VT == MVT::v8i8)
2714 return SelectPostLoad(Node, 1, ARM64::LD1Rv8b_POST, ARM64::dsub0);
2715 else if (VT == MVT::v16i8)
2716 return SelectPostLoad(Node, 1, ARM64::LD1Rv16b_POST, ARM64::qsub0);
2717 else if (VT == MVT::v4i16)
2718 return SelectPostLoad(Node, 1, ARM64::LD1Rv4h_POST, ARM64::dsub0);
2719 else if (VT == MVT::v8i16)
2720 return SelectPostLoad(Node, 1, ARM64::LD1Rv8h_POST, ARM64::qsub0);
2721 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2722 return SelectPostLoad(Node, 1, ARM64::LD1Rv2s_POST, ARM64::dsub0);
2723 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2724 return SelectPostLoad(Node, 1, ARM64::LD1Rv4s_POST, ARM64::qsub0);
2725 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2726 return SelectPostLoad(Node, 1, ARM64::LD1Rv1d_POST, ARM64::dsub0);
2727 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2728 return SelectPostLoad(Node, 1, ARM64::LD1Rv2d_POST, ARM64::qsub0);
2731 case ARM64ISD::LD2DUPpost: {
2732 if (VT == MVT::v8i8)
2733 return SelectPostLoad(Node, 2, ARM64::LD2Rv8b_POST, ARM64::dsub0);
2734 else if (VT == MVT::v16i8)
2735 return SelectPostLoad(Node, 2, ARM64::LD2Rv16b_POST, ARM64::qsub0);
2736 else if (VT == MVT::v4i16)
2737 return SelectPostLoad(Node, 2, ARM64::LD2Rv4h_POST, ARM64::dsub0);
2738 else if (VT == MVT::v8i16)
2739 return SelectPostLoad(Node, 2, ARM64::LD2Rv8h_POST, ARM64::qsub0);
2740 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2741 return SelectPostLoad(Node, 2, ARM64::LD2Rv2s_POST, ARM64::dsub0);
2742 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2743 return SelectPostLoad(Node, 2, ARM64::LD2Rv4s_POST, ARM64::qsub0);
2744 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2745 return SelectPostLoad(Node, 2, ARM64::LD2Rv1d_POST, ARM64::dsub0);
2746 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2747 return SelectPostLoad(Node, 2, ARM64::LD2Rv2d_POST, ARM64::qsub0);
2750 case ARM64ISD::LD3DUPpost: {
2751 if (VT == MVT::v8i8)
2752 return SelectPostLoad(Node, 3, ARM64::LD3Rv8b_POST, ARM64::dsub0);
2753 else if (VT == MVT::v16i8)
2754 return SelectPostLoad(Node, 3, ARM64::LD3Rv16b_POST, ARM64::qsub0);
2755 else if (VT == MVT::v4i16)
2756 return SelectPostLoad(Node, 3, ARM64::LD3Rv4h_POST, ARM64::dsub0);
2757 else if (VT == MVT::v8i16)
2758 return SelectPostLoad(Node, 3, ARM64::LD3Rv8h_POST, ARM64::qsub0);
2759 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2760 return SelectPostLoad(Node, 3, ARM64::LD3Rv2s_POST, ARM64::dsub0);
2761 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2762 return SelectPostLoad(Node, 3, ARM64::LD3Rv4s_POST, ARM64::qsub0);
2763 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2764 return SelectPostLoad(Node, 3, ARM64::LD3Rv1d_POST, ARM64::dsub0);
2765 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2766 return SelectPostLoad(Node, 3, ARM64::LD3Rv2d_POST, ARM64::qsub0);
2769 case ARM64ISD::LD4DUPpost: {
2770 if (VT == MVT::v8i8)
2771 return SelectPostLoad(Node, 4, ARM64::LD4Rv8b_POST, ARM64::dsub0);
2772 else if (VT == MVT::v16i8)
2773 return SelectPostLoad(Node, 4, ARM64::LD4Rv16b_POST, ARM64::qsub0);
2774 else if (VT == MVT::v4i16)
2775 return SelectPostLoad(Node, 4, ARM64::LD4Rv4h_POST, ARM64::dsub0);
2776 else if (VT == MVT::v8i16)
2777 return SelectPostLoad(Node, 4, ARM64::LD4Rv8h_POST, ARM64::qsub0);
2778 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2779 return SelectPostLoad(Node, 4, ARM64::LD4Rv2s_POST, ARM64::dsub0);
2780 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2781 return SelectPostLoad(Node, 4, ARM64::LD4Rv4s_POST, ARM64::qsub0);
2782 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2783 return SelectPostLoad(Node, 4, ARM64::LD4Rv1d_POST, ARM64::dsub0);
2784 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2785 return SelectPostLoad(Node, 4, ARM64::LD4Rv2d_POST, ARM64::qsub0);
2788 case ARM64ISD::LD1LANEpost: {
2789 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2790 return SelectPostLoadLane(Node, 1, ARM64::LD1i8_POST);
2791 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2792 return SelectPostLoadLane(Node, 1, ARM64::LD1i16_POST);
2793 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2795 return SelectPostLoadLane(Node, 1, ARM64::LD1i32_POST);
2796 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2798 return SelectPostLoadLane(Node, 1, ARM64::LD1i64_POST);
2801 case ARM64ISD::LD2LANEpost: {
2802 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2803 return SelectPostLoadLane(Node, 2, ARM64::LD2i8_POST);
2804 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2805 return SelectPostLoadLane(Node, 2, ARM64::LD2i16_POST);
2806 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2808 return SelectPostLoadLane(Node, 2, ARM64::LD2i32_POST);
2809 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2811 return SelectPostLoadLane(Node, 2, ARM64::LD2i64_POST);
2814 case ARM64ISD::LD3LANEpost: {
2815 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2816 return SelectPostLoadLane(Node, 3, ARM64::LD3i8_POST);
2817 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2818 return SelectPostLoadLane(Node, 3, ARM64::LD3i16_POST);
2819 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2821 return SelectPostLoadLane(Node, 3, ARM64::LD3i32_POST);
2822 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2824 return SelectPostLoadLane(Node, 3, ARM64::LD3i64_POST);
2827 case ARM64ISD::LD4LANEpost: {
2828 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2829 return SelectPostLoadLane(Node, 4, ARM64::LD4i8_POST);
2830 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2831 return SelectPostLoadLane(Node, 4, ARM64::LD4i16_POST);
2832 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2834 return SelectPostLoadLane(Node, 4, ARM64::LD4i32_POST);
2835 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2837 return SelectPostLoadLane(Node, 4, ARM64::LD4i64_POST);
2840 case ARM64ISD::ST2post: {
2841 VT = Node->getOperand(1).getValueType();
2842 if (VT == MVT::v8i8)
2843 return SelectPostStore(Node, 2, ARM64::ST2Twov8b_POST);
2844 else if (VT == MVT::v16i8)
2845 return SelectPostStore(Node, 2, ARM64::ST2Twov16b_POST);
2846 else if (VT == MVT::v4i16)
2847 return SelectPostStore(Node, 2, ARM64::ST2Twov4h_POST);
2848 else if (VT == MVT::v8i16)
2849 return SelectPostStore(Node, 2, ARM64::ST2Twov8h_POST);
2850 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2851 return SelectPostStore(Node, 2, ARM64::ST2Twov2s_POST);
2852 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2853 return SelectPostStore(Node, 2, ARM64::ST2Twov4s_POST);
2854 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2855 return SelectPostStore(Node, 2, ARM64::ST2Twov2d_POST);
2856 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2857 return SelectPostStore(Node, 2, ARM64::ST1Twov1d_POST);
2860 case ARM64ISD::ST3post: {
2861 VT = Node->getOperand(1).getValueType();
2862 if (VT == MVT::v8i8)
2863 return SelectPostStore(Node, 3, ARM64::ST3Threev8b_POST);
2864 else if (VT == MVT::v16i8)
2865 return SelectPostStore(Node, 3, ARM64::ST3Threev16b_POST);
2866 else if (VT == MVT::v4i16)
2867 return SelectPostStore(Node, 3, ARM64::ST3Threev4h_POST);
2868 else if (VT == MVT::v8i16)
2869 return SelectPostStore(Node, 3, ARM64::ST3Threev8h_POST);
2870 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2871 return SelectPostStore(Node, 3, ARM64::ST3Threev2s_POST);
2872 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2873 return SelectPostStore(Node, 3, ARM64::ST3Threev4s_POST);
2874 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2875 return SelectPostStore(Node, 3, ARM64::ST3Threev2d_POST);
2876 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2877 return SelectPostStore(Node, 3, ARM64::ST1Threev1d_POST);
2880 case ARM64ISD::ST4post: {
2881 VT = Node->getOperand(1).getValueType();
2882 if (VT == MVT::v8i8)
2883 return SelectPostStore(Node, 4, ARM64::ST4Fourv8b_POST);
2884 else if (VT == MVT::v16i8)
2885 return SelectPostStore(Node, 4, ARM64::ST4Fourv16b_POST);
2886 else if (VT == MVT::v4i16)
2887 return SelectPostStore(Node, 4, ARM64::ST4Fourv4h_POST);
2888 else if (VT == MVT::v8i16)
2889 return SelectPostStore(Node, 4, ARM64::ST4Fourv8h_POST);
2890 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2891 return SelectPostStore(Node, 4, ARM64::ST4Fourv2s_POST);
2892 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2893 return SelectPostStore(Node, 4, ARM64::ST4Fourv4s_POST);
2894 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2895 return SelectPostStore(Node, 4, ARM64::ST4Fourv2d_POST);
2896 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2897 return SelectPostStore(Node, 4, ARM64::ST1Fourv1d_POST);
2900 case ARM64ISD::ST1x2post: {
2901 VT = Node->getOperand(1).getValueType();
2902 if (VT == MVT::v8i8)
2903 return SelectPostStore(Node, 2, ARM64::ST1Twov8b_POST);
2904 else if (VT == MVT::v16i8)
2905 return SelectPostStore(Node, 2, ARM64::ST1Twov16b_POST);
2906 else if (VT == MVT::v4i16)
2907 return SelectPostStore(Node, 2, ARM64::ST1Twov4h_POST);
2908 else if (VT == MVT::v8i16)
2909 return SelectPostStore(Node, 2, ARM64::ST1Twov8h_POST);
2910 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2911 return SelectPostStore(Node, 2, ARM64::ST1Twov2s_POST);
2912 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2913 return SelectPostStore(Node, 2, ARM64::ST1Twov4s_POST);
2914 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2915 return SelectPostStore(Node, 2, ARM64::ST1Twov1d_POST);
2916 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2917 return SelectPostStore(Node, 2, ARM64::ST1Twov2d_POST);
2920 case ARM64ISD::ST1x3post: {
2921 VT = Node->getOperand(1).getValueType();
2922 if (VT == MVT::v8i8)
2923 return SelectPostStore(Node, 3, ARM64::ST1Threev8b_POST);
2924 else if (VT == MVT::v16i8)
2925 return SelectPostStore(Node, 3, ARM64::ST1Threev16b_POST);
2926 else if (VT == MVT::v4i16)
2927 return SelectPostStore(Node, 3, ARM64::ST1Threev4h_POST);
2928 else if (VT == MVT::v8i16)
2929 return SelectPostStore(Node, 3, ARM64::ST1Threev8h_POST);
2930 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2931 return SelectPostStore(Node, 3, ARM64::ST1Threev2s_POST);
2932 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2933 return SelectPostStore(Node, 3, ARM64::ST1Threev4s_POST);
2934 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2935 return SelectPostStore(Node, 3, ARM64::ST1Threev1d_POST);
2936 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2937 return SelectPostStore(Node, 3, ARM64::ST1Threev2d_POST);
2940 case ARM64ISD::ST1x4post: {
2941 VT = Node->getOperand(1).getValueType();
2942 if (VT == MVT::v8i8)
2943 return SelectPostStore(Node, 4, ARM64::ST1Fourv8b_POST);
2944 else if (VT == MVT::v16i8)
2945 return SelectPostStore(Node, 4, ARM64::ST1Fourv16b_POST);
2946 else if (VT == MVT::v4i16)
2947 return SelectPostStore(Node, 4, ARM64::ST1Fourv4h_POST);
2948 else if (VT == MVT::v8i16)
2949 return SelectPostStore(Node, 4, ARM64::ST1Fourv8h_POST);
2950 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2951 return SelectPostStore(Node, 4, ARM64::ST1Fourv2s_POST);
2952 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2953 return SelectPostStore(Node, 4, ARM64::ST1Fourv4s_POST);
2954 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2955 return SelectPostStore(Node, 4, ARM64::ST1Fourv1d_POST);
2956 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2957 return SelectPostStore(Node, 4, ARM64::ST1Fourv2d_POST);
2960 case ARM64ISD::ST2LANEpost: {
2961 VT = Node->getOperand(1).getValueType();
2962 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2963 return SelectPostStoreLane(Node, 2, ARM64::ST2i8_POST);
2964 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2965 return SelectPostStoreLane(Node, 2, ARM64::ST2i16_POST);
2966 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2968 return SelectPostStoreLane(Node, 2, ARM64::ST2i32_POST);
2969 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2971 return SelectPostStoreLane(Node, 2, ARM64::ST2i64_POST);
2974 case ARM64ISD::ST3LANEpost: {
2975 VT = Node->getOperand(1).getValueType();
2976 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2977 return SelectPostStoreLane(Node, 3, ARM64::ST3i8_POST);
2978 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2979 return SelectPostStoreLane(Node, 3, ARM64::ST3i16_POST);
2980 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2982 return SelectPostStoreLane(Node, 3, ARM64::ST3i32_POST);
2983 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2985 return SelectPostStoreLane(Node, 3, ARM64::ST3i64_POST);
2988 case ARM64ISD::ST4LANEpost: {
2989 VT = Node->getOperand(1).getValueType();
2990 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2991 return SelectPostStoreLane(Node, 4, ARM64::ST4i8_POST);
2992 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2993 return SelectPostStoreLane(Node, 4, ARM64::ST4i16_POST);
2994 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2996 return SelectPostStoreLane(Node, 4, ARM64::ST4i32_POST);
2997 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2999 return SelectPostStoreLane(Node, 4, ARM64::ST4i64_POST);
3007 if (SDNode *I = SelectLIBM(Node))
3012 // Select the default instruction
3013 ResNode = SelectCode(Node);
3015 DEBUG(errs() << "=> ");
3016 if (ResNode == nullptr || ResNode == Node)
3017 DEBUG(Node->dump(CurDAG));
3019 DEBUG(ResNode->dump(CurDAG));
3020 DEBUG(errs() << "\n");
3025 /// createARM64ISelDag - This pass converts a legalized DAG into a
3026 /// ARM64-specific DAG, ready for instruction scheduling.
3027 FunctionPass *llvm::createARM64ISelDag(ARM64TargetMachine &TM,
3028 CodeGenOpt::Level OptLevel) {
3029 return new ARM64DAGToDAGISel(TM, OptLevel);