1 //===-- ARM64ISelDAGToDAG.cpp - A dag to dag inst selector for ARM64 ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM64 target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "arm64-isel"
15 #include "ARM64TargetMachine.h"
16 #include "MCTargetDesc/ARM64AddressingModes.h"
17 #include "llvm/ADT/APSInt.h"
18 #include "llvm/CodeGen/SelectionDAGISel.h"
19 #include "llvm/IR/Function.h" // To access function attributes.
20 #include "llvm/IR/GlobalValue.h"
21 #include "llvm/IR/Intrinsics.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/MathExtras.h"
25 #include "llvm/Support/raw_ostream.h"
29 //===--------------------------------------------------------------------===//
30 /// ARM64DAGToDAGISel - ARM64 specific code to select ARM64 machine
31 /// instructions for SelectionDAG operations.
35 class ARM64DAGToDAGISel : public SelectionDAGISel {
36 ARM64TargetMachine &TM;
38 /// Subtarget - Keep a pointer to the ARM64Subtarget around so that we can
39 /// make the right decision when generating code for different targets.
40 const ARM64Subtarget *Subtarget;
45 explicit ARM64DAGToDAGISel(ARM64TargetMachine &tm, CodeGenOpt::Level OptLevel)
46 : SelectionDAGISel(tm, OptLevel), TM(tm),
47 Subtarget(&TM.getSubtarget<ARM64Subtarget>()), ForCodeSize(false) {}
49 virtual const char *getPassName() const {
50 return "ARM64 Instruction Selection";
53 virtual bool runOnMachineFunction(MachineFunction &MF) {
54 AttributeSet FnAttrs = MF.getFunction()->getAttributes();
56 FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
57 Attribute::OptimizeForSize) ||
58 FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
59 return SelectionDAGISel::runOnMachineFunction(MF);
62 SDNode *Select(SDNode *Node);
64 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
65 /// inline asm expressions.
66 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
68 std::vector<SDValue> &OutOps);
70 SDNode *SelectMLAV64LaneV128(SDNode *N);
71 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
72 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
73 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
74 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
75 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
76 return SelectShiftedRegister(N, false, Reg, Shift);
78 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
79 return SelectShiftedRegister(N, true, Reg, Shift);
81 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
82 return SelectAddrModeIndexed(N, 1, Base, OffImm);
84 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
85 return SelectAddrModeIndexed(N, 2, Base, OffImm);
87 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
88 return SelectAddrModeIndexed(N, 4, Base, OffImm);
90 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
91 return SelectAddrModeIndexed(N, 8, Base, OffImm);
93 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
94 return SelectAddrModeIndexed(N, 16, Base, OffImm);
96 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
97 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
99 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
100 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
102 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
103 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
105 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
106 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
108 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
109 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
112 bool SelectAddrModeRO8(SDValue N, SDValue &Base, SDValue &Offset,
114 return SelectAddrModeRO(N, 1, Base, Offset, Imm);
116 bool SelectAddrModeRO16(SDValue N, SDValue &Base, SDValue &Offset,
118 return SelectAddrModeRO(N, 2, Base, Offset, Imm);
120 bool SelectAddrModeRO32(SDValue N, SDValue &Base, SDValue &Offset,
122 return SelectAddrModeRO(N, 4, Base, Offset, Imm);
124 bool SelectAddrModeRO64(SDValue N, SDValue &Base, SDValue &Offset,
126 return SelectAddrModeRO(N, 8, Base, Offset, Imm);
128 bool SelectAddrModeRO128(SDValue N, SDValue &Base, SDValue &Offset,
130 return SelectAddrModeRO(N, 16, Base, Offset, Imm);
132 bool SelectAddrModeNoIndex(SDValue N, SDValue &Val);
134 /// Form sequences of consecutive 64/128-bit registers for use in NEON
135 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
136 /// between 1 and 4 elements. If it contains a single element that is returned
137 /// unchanged; otherwise a REG_SEQUENCE value is returned.
138 SDValue createDTuple(ArrayRef<SDValue> Vecs);
139 SDValue createQTuple(ArrayRef<SDValue> Vecs);
141 /// Generic helper for the createDTuple/createQTuple
142 /// functions. Those should almost always be called instead.
143 SDValue createTuple(ArrayRef<SDValue> Vecs, unsigned RegClassIDs[],
146 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
148 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
150 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
152 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
154 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
155 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
157 SDNode *SelectSIMDAddSubNarrowing(unsigned IntNo, SDNode *Node);
158 SDNode *SelectSIMDXtnNarrowing(unsigned IntNo, SDNode *Node);
160 SDNode *SelectAtomic(SDNode *Node, unsigned Op8, unsigned Op16, unsigned Op32,
163 SDNode *SelectBitfieldExtractOp(SDNode *N);
164 SDNode *SelectBitfieldInsertOp(SDNode *N);
166 SDNode *SelectLIBM(SDNode *N);
168 // Include the pieces autogenerated from the target description.
169 #include "ARM64GenDAGISel.inc"
172 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
174 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
176 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
178 bool SelectAddrModeRO(SDValue N, unsigned Size, SDValue &Base,
179 SDValue &Offset, SDValue &Imm);
180 bool isWorthFolding(SDValue V) const;
181 bool SelectExtendedSHL(SDValue N, unsigned Size, SDValue &Offset,
184 template<unsigned RegWidth>
185 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
186 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
189 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
191 } // end anonymous namespace
193 /// isIntImmediate - This method tests to see if the node is a constant
194 /// operand. If so Imm will receive the 32-bit value.
195 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
196 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
197 Imm = C->getZExtValue();
203 // isIntImmediate - This method tests to see if a constant operand.
204 // If so Imm will receive the value.
205 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
206 return isIntImmediate(N.getNode(), Imm);
209 // isOpcWithIntImmediate - This method tests to see if the node is a specific
210 // opcode and that it has a immediate integer right operand.
211 // If so Imm will receive the 32 bit value.
212 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
214 return N->getOpcode() == Opc &&
215 isIntImmediate(N->getOperand(1).getNode(), Imm);
218 bool ARM64DAGToDAGISel::SelectAddrModeNoIndex(SDValue N, SDValue &Val) {
219 EVT ValTy = N.getValueType();
220 if (ValTy != MVT::i64)
226 bool ARM64DAGToDAGISel::SelectInlineAsmMemoryOperand(
227 const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps) {
228 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
229 // Require the address to be in a register. That is safe for all ARM64
230 // variants and it is hard to do anything much smarter without knowing
231 // how the operand is used.
232 OutOps.push_back(Op);
236 /// SelectArithImmed - Select an immediate value that can be represented as
237 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
238 /// Val set to the 12-bit value and Shift set to the shifter operand.
239 bool ARM64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
241 // This function is called from the addsub_shifted_imm ComplexPattern,
242 // which lists [imm] as the list of opcode it's interested in, however
243 // we still need to check whether the operand is actually an immediate
244 // here because the ComplexPattern opcode list is only used in
245 // root-level opcode matching.
246 if (!isa<ConstantSDNode>(N.getNode()))
249 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
252 if (Immed >> 12 == 0) {
254 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
260 unsigned ShVal = ARM64_AM::getShifterImm(ARM64_AM::LSL, ShiftAmt);
261 Val = CurDAG->getTargetConstant(Immed, MVT::i32);
262 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
266 /// SelectNegArithImmed - As above, but negates the value before trying to
268 bool ARM64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
270 // This function is called from the addsub_shifted_imm ComplexPattern,
271 // which lists [imm] as the list of opcode it's interested in, however
272 // we still need to check whether the operand is actually an immediate
273 // here because the ComplexPattern opcode list is only used in
274 // root-level opcode matching.
275 if (!isa<ConstantSDNode>(N.getNode()))
278 // The immediate operand must be a 24-bit zero-extended immediate.
279 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
281 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
282 // have the opposite effect on the C flag, so this pattern mustn't match under
283 // those circumstances.
287 if (N.getValueType() == MVT::i32)
288 Immed = ~((uint32_t)Immed) + 1;
290 Immed = ~Immed + 1ULL;
291 if (Immed & 0xFFFFFFFFFF000000ULL)
294 Immed &= 0xFFFFFFULL;
295 return SelectArithImmed(CurDAG->getConstant(Immed, MVT::i32), Val, Shift);
298 /// getShiftTypeForNode - Translate a shift node to the corresponding
300 static ARM64_AM::ShiftType getShiftTypeForNode(SDValue N) {
301 switch (N.getOpcode()) {
303 return ARM64_AM::InvalidShift;
305 return ARM64_AM::LSL;
307 return ARM64_AM::LSR;
309 return ARM64_AM::ASR;
311 return ARM64_AM::ROR;
315 /// \brief Determine wether it is worth to fold V into an extended register.
316 bool ARM64DAGToDAGISel::isWorthFolding(SDValue V) const {
317 // it hurts if the a value is used at least twice, unless we are optimizing
319 if (ForCodeSize || V.hasOneUse())
324 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
325 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
326 /// instructions allow the shifted register to be rotated, but the arithmetic
327 /// instructions do not. The AllowROR parameter specifies whether ROR is
329 bool ARM64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
330 SDValue &Reg, SDValue &Shift) {
331 ARM64_AM::ShiftType ShType = getShiftTypeForNode(N);
332 if (ShType == ARM64_AM::InvalidShift)
334 if (!AllowROR && ShType == ARM64_AM::ROR)
337 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
338 unsigned BitSize = N.getValueType().getSizeInBits();
339 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
340 unsigned ShVal = ARM64_AM::getShifterImm(ShType, Val);
342 Reg = N.getOperand(0);
343 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
344 return isWorthFolding(N);
350 /// getExtendTypeForNode - Translate an extend node to the corresponding
351 /// ExtendType value.
352 static ARM64_AM::ExtendType getExtendTypeForNode(SDValue N,
353 bool IsLoadStore = false) {
354 if (N.getOpcode() == ISD::SIGN_EXTEND ||
355 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
357 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
358 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
360 SrcVT = N.getOperand(0).getValueType();
362 if (!IsLoadStore && SrcVT == MVT::i8)
363 return ARM64_AM::SXTB;
364 else if (!IsLoadStore && SrcVT == MVT::i16)
365 return ARM64_AM::SXTH;
366 else if (SrcVT == MVT::i32)
367 return ARM64_AM::SXTW;
368 else if (SrcVT == MVT::i64)
369 return ARM64_AM::SXTX;
371 return ARM64_AM::InvalidExtend;
372 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
373 N.getOpcode() == ISD::ANY_EXTEND) {
374 EVT SrcVT = N.getOperand(0).getValueType();
375 if (!IsLoadStore && SrcVT == MVT::i8)
376 return ARM64_AM::UXTB;
377 else if (!IsLoadStore && SrcVT == MVT::i16)
378 return ARM64_AM::UXTH;
379 else if (SrcVT == MVT::i32)
380 return ARM64_AM::UXTW;
381 else if (SrcVT == MVT::i64)
382 return ARM64_AM::UXTX;
384 return ARM64_AM::InvalidExtend;
385 } else if (N.getOpcode() == ISD::AND) {
386 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
388 return ARM64_AM::InvalidExtend;
389 uint64_t AndMask = CSD->getZExtValue();
393 return ARM64_AM::InvalidExtend;
395 return !IsLoadStore ? ARM64_AM::UXTB : ARM64_AM::InvalidExtend;
397 return !IsLoadStore ? ARM64_AM::UXTH : ARM64_AM::InvalidExtend;
399 return ARM64_AM::UXTW;
403 return ARM64_AM::InvalidExtend;
406 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
407 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
408 if (DL->getOpcode() != ARM64ISD::DUPLANE16 &&
409 DL->getOpcode() != ARM64ISD::DUPLANE32)
412 SDValue SV = DL->getOperand(0);
413 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
416 SDValue EV = SV.getOperand(1);
417 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
420 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
421 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
422 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
423 LaneOp = EV.getOperand(0);
428 // Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
429 // high lane extract.
430 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
431 SDValue &LaneOp, int &LaneIdx) {
433 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
435 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
442 /// SelectMLAV64LaneV128 - ARM64 supports vector MLAs where one multiplicand is
443 /// a lane in the upper half of a 128-bit vector. Recognize and select this so
444 /// that we don't emit unnecessary lane extracts.
445 SDNode *ARM64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
446 SDValue Op0 = N->getOperand(0);
447 SDValue Op1 = N->getOperand(1);
448 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
449 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
450 int LaneIdx = -1; // Will hold the lane index.
452 if (Op1.getOpcode() != ISD::MUL ||
453 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
456 if (Op1.getOpcode() != ISD::MUL ||
457 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
462 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
464 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
466 unsigned MLAOpc = ~0U;
468 switch (N->getSimpleValueType(0).SimpleTy) {
470 llvm_unreachable("Unrecognized MLA.");
472 MLAOpc = ARM64::MLAv4i16_indexed;
475 MLAOpc = ARM64::MLAv8i16_indexed;
478 MLAOpc = ARM64::MLAv2i32_indexed;
481 MLAOpc = ARM64::MLAv4i32_indexed;
485 return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops);
488 SDNode *ARM64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
493 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
497 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
499 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
501 unsigned SMULLOpc = ~0U;
503 if (IntNo == Intrinsic::arm64_neon_smull) {
504 switch (N->getSimpleValueType(0).SimpleTy) {
506 llvm_unreachable("Unrecognized SMULL.");
508 SMULLOpc = ARM64::SMULLv4i16_indexed;
511 SMULLOpc = ARM64::SMULLv2i32_indexed;
514 } else if (IntNo == Intrinsic::arm64_neon_umull) {
515 switch (N->getSimpleValueType(0).SimpleTy) {
517 llvm_unreachable("Unrecognized SMULL.");
519 SMULLOpc = ARM64::UMULLv4i16_indexed;
522 SMULLOpc = ARM64::UMULLv2i32_indexed;
526 llvm_unreachable("Unrecognized intrinsic.");
528 return CurDAG->getMachineNode(SMULLOpc, SDLoc(N), N->getValueType(0), Ops);
531 /// SelectArithExtendedRegister - Select a "extended register" operand. This
532 /// operand folds in an extend followed by an optional left shift.
533 bool ARM64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
535 unsigned ShiftVal = 0;
536 ARM64_AM::ExtendType Ext;
538 if (N.getOpcode() == ISD::SHL) {
539 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
542 ShiftVal = CSD->getZExtValue();
546 Ext = getExtendTypeForNode(N.getOperand(0));
547 if (Ext == ARM64_AM::InvalidExtend)
550 Reg = N.getOperand(0).getOperand(0);
552 Ext = getExtendTypeForNode(N);
553 if (Ext == ARM64_AM::InvalidExtend)
556 Reg = N.getOperand(0);
559 // ARM64 mandates that the RHS of the operation must use the smallest
560 // register classs that could contain the size being extended from. Thus,
561 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
562 // there might not be an actual 32-bit value in the program. We can
563 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
564 if (Reg.getValueType() == MVT::i64 && Ext != ARM64_AM::UXTX &&
565 Ext != ARM64_AM::SXTX) {
566 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
567 MachineSDNode *Node = CurDAG->getMachineNode(
568 TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32, Reg, SubReg);
569 Reg = SDValue(Node, 0);
572 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), MVT::i32);
573 return isWorthFolding(N);
576 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
577 /// immediate" address. The "Size" argument is the size in bytes of the memory
578 /// reference, which determines the scale.
579 bool ARM64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
580 SDValue &Base, SDValue &OffImm) {
581 const TargetLowering *TLI = getTargetLowering();
582 if (N.getOpcode() == ISD::FrameIndex) {
583 int FI = cast<FrameIndexSDNode>(N)->getIndex();
584 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
585 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
589 if (N.getOpcode() == ARM64ISD::ADDlow) {
590 GlobalAddressSDNode *GAN =
591 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
592 Base = N.getOperand(0);
593 OffImm = N.getOperand(1);
597 const GlobalValue *GV = GAN->getGlobal();
598 unsigned Alignment = GV->getAlignment();
599 const DataLayout *DL = TLI->getDataLayout();
600 if (Alignment == 0 && !Subtarget->isTargetDarwin())
601 Alignment = DL->getABITypeAlignment(GV->getType()->getElementType());
603 if (Alignment >= Size)
607 if (CurDAG->isBaseWithConstantOffset(N)) {
608 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
609 int64_t RHSC = (int64_t)RHS->getZExtValue();
610 unsigned Scale = Log2_32(Size);
611 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
612 Base = N.getOperand(0);
613 if (Base.getOpcode() == ISD::FrameIndex) {
614 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
615 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
617 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, MVT::i64);
623 // Before falling back to our general case, check if the unscaled
624 // instructions can handle this. If so, that's preferable.
625 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
628 // Base only. The address will be materialized into a register before
629 // the memory is accessed.
630 // add x0, Xbase, #offset
633 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
637 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
638 /// immediate" address. This should only match when there is an offset that
639 /// is not valid for a scaled immediate addressing mode. The "Size" argument
640 /// is the size in bytes of the memory reference, which is needed here to know
641 /// what is valid for a scaled immediate.
642 bool ARM64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
643 SDValue &Base, SDValue &OffImm) {
644 if (!CurDAG->isBaseWithConstantOffset(N))
646 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
647 int64_t RHSC = RHS->getSExtValue();
648 // If the offset is valid as a scaled immediate, don't match here.
649 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
650 RHSC < (0x1000 << Log2_32(Size)))
652 if (RHSC >= -256 && RHSC < 256) {
653 Base = N.getOperand(0);
654 if (Base.getOpcode() == ISD::FrameIndex) {
655 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
656 const TargetLowering *TLI = getTargetLowering();
657 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
659 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i64);
666 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
667 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
668 SDValue ImpDef = SDValue(
669 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SDLoc(N), MVT::i64),
671 MachineSDNode *Node = CurDAG->getMachineNode(
672 TargetOpcode::INSERT_SUBREG, SDLoc(N), MVT::i64, ImpDef, N, SubReg);
673 return SDValue(Node, 0);
676 static SDValue WidenIfNeeded(SelectionDAG *CurDAG, SDValue N) {
677 if (N.getValueType() == MVT::i32) {
678 return Widen(CurDAG, N);
684 /// \brief Check if the given SHL node (\p N), can be used to form an
685 /// extended register for an addressing mode.
686 bool ARM64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
687 SDValue &Offset, SDValue &Imm) {
688 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
689 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
690 if (CSD && (CSD->getZExtValue() & 0x7) == CSD->getZExtValue()) {
692 ARM64_AM::ExtendType Ext = getExtendTypeForNode(N.getOperand(0), true);
693 if (Ext == ARM64_AM::InvalidExtend) {
694 Ext = ARM64_AM::UXTX;
695 Offset = WidenIfNeeded(CurDAG, N.getOperand(0));
697 Offset = WidenIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
700 unsigned LegalShiftVal = Log2_32(Size);
701 unsigned ShiftVal = CSD->getZExtValue();
703 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
706 Imm = CurDAG->getTargetConstant(
707 ARM64_AM::getMemExtendImm(Ext, ShiftVal != 0), MVT::i32);
708 if (isWorthFolding(N))
714 bool ARM64DAGToDAGISel::SelectAddrModeRO(SDValue N, unsigned Size,
715 SDValue &Base, SDValue &Offset,
717 if (N.getOpcode() != ISD::ADD)
719 SDValue LHS = N.getOperand(0);
720 SDValue RHS = N.getOperand(1);
722 // We don't want to match immediate adds here, because they are better lowered
723 // to the register-immediate addressing modes.
724 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
727 // Check if this particular node is reused in any non-memory related
728 // operation. If yes, do not try to fold this node into the address
729 // computation, since the computation will be kept.
730 const SDNode *Node = N.getNode();
731 for (SDNode *UI : Node->uses()) {
732 if (!isa<MemSDNode>(*UI))
736 // Remember if it is worth folding N when it produces extended register.
737 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
739 // Try to match a shifted extend on the RHS.
740 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
741 SelectExtendedSHL(RHS, Size, Offset, Imm)) {
746 // Try to match a shifted extend on the LHS.
747 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
748 SelectExtendedSHL(LHS, Size, Offset, Imm)) {
753 ARM64_AM::ExtendType Ext = ARM64_AM::UXTX;
754 // Try to match an unshifted extend on the LHS.
755 if (IsExtendedRegisterWorthFolding &&
756 (Ext = getExtendTypeForNode(LHS, true)) != ARM64_AM::InvalidExtend) {
758 Offset = WidenIfNeeded(CurDAG, LHS.getOperand(0));
759 Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
761 if (isWorthFolding(LHS))
765 // Try to match an unshifted extend on the RHS.
766 if (IsExtendedRegisterWorthFolding &&
767 (Ext = getExtendTypeForNode(RHS, true)) != ARM64_AM::InvalidExtend) {
769 Offset = WidenIfNeeded(CurDAG, RHS.getOperand(0));
770 Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
772 if (isWorthFolding(RHS))
776 // Match any non-shifted, non-extend, non-immediate add expression.
778 Offset = WidenIfNeeded(CurDAG, RHS);
779 Ext = ARM64_AM::UXTX;
780 Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
782 // Reg1 + Reg2 is free: no check needed.
786 SDValue ARM64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
787 static unsigned RegClassIDs[] = { ARM64::DDRegClassID, ARM64::DDDRegClassID,
788 ARM64::DDDDRegClassID };
789 static unsigned SubRegs[] = { ARM64::dsub0, ARM64::dsub1,
790 ARM64::dsub2, ARM64::dsub3 };
792 return createTuple(Regs, RegClassIDs, SubRegs);
795 SDValue ARM64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
796 static unsigned RegClassIDs[] = { ARM64::QQRegClassID, ARM64::QQQRegClassID,
797 ARM64::QQQQRegClassID };
798 static unsigned SubRegs[] = { ARM64::qsub0, ARM64::qsub1,
799 ARM64::qsub2, ARM64::qsub3 };
801 return createTuple(Regs, RegClassIDs, SubRegs);
804 SDValue ARM64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
805 unsigned RegClassIDs[],
806 unsigned SubRegs[]) {
807 // There's no special register-class for a vector-list of 1 element: it's just
809 if (Regs.size() == 1)
812 assert(Regs.size() >= 2 && Regs.size() <= 4);
814 SDLoc DL(Regs[0].getNode());
816 SmallVector<SDValue, 4> Ops;
818 // First operand of REG_SEQUENCE is the desired RegClass.
820 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
822 // Then we get pairs of source & subregister-position for the components.
823 for (unsigned i = 0; i < Regs.size(); ++i) {
824 Ops.push_back(Regs[i]);
825 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
829 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
830 return SDValue(N, 0);
833 SDNode *ARM64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
834 unsigned Opc, bool isExt) {
836 EVT VT = N->getValueType(0);
838 unsigned ExtOff = isExt;
840 // Form a REG_SEQUENCE to force register allocation.
841 unsigned Vec0Off = ExtOff + 1;
842 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
843 N->op_begin() + Vec0Off + NumVecs);
844 SDValue RegSeq = createQTuple(Regs);
846 SmallVector<SDValue, 6> Ops;
848 Ops.push_back(N->getOperand(1));
849 Ops.push_back(RegSeq);
850 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
851 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
854 SDNode *ARM64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
855 LoadSDNode *LD = cast<LoadSDNode>(N);
856 if (LD->isUnindexed())
858 EVT VT = LD->getMemoryVT();
859 EVT DstVT = N->getValueType(0);
860 ISD::MemIndexedMode AM = LD->getAddressingMode();
861 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
863 // We're not doing validity checking here. That was done when checking
864 // if we should mark the load as indexed or not. We're just selecting
865 // the right instruction.
868 ISD::LoadExtType ExtType = LD->getExtensionType();
869 bool InsertTo64 = false;
871 Opcode = IsPre ? ARM64::LDRXpre_isel : ARM64::LDRXpost_isel;
872 else if (VT == MVT::i32) {
873 if (ExtType == ISD::NON_EXTLOAD)
874 Opcode = IsPre ? ARM64::LDRWpre_isel : ARM64::LDRWpost_isel;
875 else if (ExtType == ISD::SEXTLOAD)
876 Opcode = IsPre ? ARM64::LDRSWpre_isel : ARM64::LDRSWpost_isel;
878 Opcode = IsPre ? ARM64::LDRWpre_isel : ARM64::LDRWpost_isel;
880 // The result of the load is only i32. It's the subreg_to_reg that makes
884 } else if (VT == MVT::i16) {
885 if (ExtType == ISD::SEXTLOAD) {
886 if (DstVT == MVT::i64)
887 Opcode = IsPre ? ARM64::LDRSHXpre_isel : ARM64::LDRSHXpost_isel;
889 Opcode = IsPre ? ARM64::LDRSHWpre_isel : ARM64::LDRSHWpost_isel;
891 Opcode = IsPre ? ARM64::LDRHHpre_isel : ARM64::LDRHHpost_isel;
892 InsertTo64 = DstVT == MVT::i64;
893 // The result of the load is only i32. It's the subreg_to_reg that makes
897 } else if (VT == MVT::i8) {
898 if (ExtType == ISD::SEXTLOAD) {
899 if (DstVT == MVT::i64)
900 Opcode = IsPre ? ARM64::LDRSBXpre_isel : ARM64::LDRSBXpost_isel;
902 Opcode = IsPre ? ARM64::LDRSBWpre_isel : ARM64::LDRSBWpost_isel;
904 Opcode = IsPre ? ARM64::LDRBBpre_isel : ARM64::LDRBBpost_isel;
905 InsertTo64 = DstVT == MVT::i64;
906 // The result of the load is only i32. It's the subreg_to_reg that makes
910 } else if (VT == MVT::f32) {
911 Opcode = IsPre ? ARM64::LDRSpre_isel : ARM64::LDRSpost_isel;
912 } else if (VT == MVT::f64) {
913 Opcode = IsPre ? ARM64::LDRDpre_isel : ARM64::LDRDpost_isel;
916 SDValue Chain = LD->getChain();
917 SDValue Base = LD->getBasePtr();
918 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
919 int OffsetVal = (int)OffsetOp->getZExtValue();
920 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, MVT::i64);
921 SDValue Ops[] = { Base, Offset, Chain };
922 SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), DstVT, MVT::i64,
924 // Either way, we're replacing the node, so tell the caller that.
927 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
928 SDNode *Sub = CurDAG->getMachineNode(
929 ARM64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
930 CurDAG->getTargetConstant(0, MVT::i64), SDValue(Res, 0), SubReg);
931 ReplaceUses(SDValue(N, 0), SDValue(Sub, 0));
932 ReplaceUses(SDValue(N, 1), SDValue(Res, 1));
933 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
939 SDNode *ARM64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
940 unsigned SubRegIdx) {
942 EVT VT = N->getValueType(0);
943 SDValue Chain = N->getOperand(0);
945 SmallVector<SDValue, 6> Ops;
946 Ops.push_back(N->getOperand(2)); // Mem operand;
947 Ops.push_back(Chain);
949 std::vector<EVT> ResTys;
950 ResTys.push_back(MVT::Untyped);
951 ResTys.push_back(MVT::Other);
953 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
954 SDValue SuperReg = SDValue(Ld, 0);
956 // MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
957 // MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
958 // cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
962 ReplaceUses(SDValue(N, 3), CurDAG->getTargetExtractSubreg(SubRegIdx + 3, dl,
966 ReplaceUses(SDValue(N, 2), CurDAG->getTargetExtractSubreg(SubRegIdx + 2, dl,
970 ReplaceUses(SDValue(N, 1), CurDAG->getTargetExtractSubreg(SubRegIdx + 1, dl,
972 ReplaceUses(SDValue(N, 0),
973 CurDAG->getTargetExtractSubreg(SubRegIdx, dl, VT, SuperReg));
976 ReplaceUses(SDValue(N, 0), SuperReg);
980 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
985 SDNode *ARM64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
988 EVT VT = N->getOperand(2)->getValueType(0);
990 // Form a REG_SEQUENCE to force register allocation.
991 bool Is128Bit = VT.getSizeInBits() == 128;
992 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
993 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
995 SmallVector<SDValue, 6> Ops;
996 Ops.push_back(RegSeq);
997 Ops.push_back(N->getOperand(NumVecs + 2));
998 Ops.push_back(N->getOperand(0));
999 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1004 /// WidenVector - Given a value in the V64 register class, produce the
1005 /// equivalent value in the V128 register class.
1010 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1012 SDValue operator()(SDValue V64Reg) {
1013 EVT VT = V64Reg.getValueType();
1014 unsigned NarrowSize = VT.getVectorNumElements();
1015 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1016 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1020 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1021 return DAG.getTargetInsertSubreg(ARM64::dsub, DL, WideTy, Undef, V64Reg);
1025 /// NarrowVector - Given a value in the V128 register class, produce the
1026 /// equivalent value in the V64 register class.
1027 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1028 EVT VT = V128Reg.getValueType();
1029 unsigned WideSize = VT.getVectorNumElements();
1030 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1031 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1033 return DAG.getTargetExtractSubreg(ARM64::dsub, SDLoc(V128Reg), NarrowTy,
1037 SDNode *ARM64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1040 EVT VT = N->getValueType(0);
1041 bool Narrow = VT.getSizeInBits() == 64;
1043 // Form a REG_SEQUENCE to force register allocation.
1044 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1047 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1048 WidenVector(*CurDAG));
1050 SDValue RegSeq = createQTuple(Regs);
1052 std::vector<EVT> ResTys;
1053 ResTys.push_back(MVT::Untyped);
1054 ResTys.push_back(MVT::Other);
1057 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1059 SmallVector<SDValue, 6> Ops;
1060 Ops.push_back(RegSeq);
1061 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1062 Ops.push_back(N->getOperand(NumVecs + 3));
1063 Ops.push_back(N->getOperand(0));
1064 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1065 SDValue SuperReg = SDValue(Ld, 0);
1067 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1071 CurDAG->getTargetExtractSubreg(ARM64::qsub3, dl, WideVT, SuperReg);
1073 ReplaceUses(SDValue(N, 3), NarrowVector(NV3, *CurDAG));
1075 ReplaceUses(SDValue(N, 3), NV3);
1080 CurDAG->getTargetExtractSubreg(ARM64::qsub2, dl, WideVT, SuperReg);
1082 ReplaceUses(SDValue(N, 2), NarrowVector(NV2, *CurDAG));
1084 ReplaceUses(SDValue(N, 2), NV2);
1089 CurDAG->getTargetExtractSubreg(ARM64::qsub1, dl, WideVT, SuperReg);
1091 CurDAG->getTargetExtractSubreg(ARM64::qsub0, dl, WideVT, SuperReg);
1093 ReplaceUses(SDValue(N, 1), NarrowVector(NV1, *CurDAG));
1094 ReplaceUses(SDValue(N, 0), NarrowVector(NV0, *CurDAG));
1096 ReplaceUses(SDValue(N, 1), NV1);
1097 ReplaceUses(SDValue(N, 0), NV0);
1103 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1108 SDNode *ARM64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1111 EVT VT = N->getOperand(2)->getValueType(0);
1112 bool Narrow = VT.getSizeInBits() == 64;
1114 // Form a REG_SEQUENCE to force register allocation.
1115 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1118 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1119 WidenVector(*CurDAG));
1121 SDValue RegSeq = createQTuple(Regs);
1124 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1126 SmallVector<SDValue, 6> Ops;
1127 Ops.push_back(RegSeq);
1128 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1129 Ops.push_back(N->getOperand(NumVecs + 3));
1130 Ops.push_back(N->getOperand(0));
1131 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1133 // Transfer memoperands.
1134 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1135 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1136 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1141 SDNode *ARM64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
1142 unsigned Op16, unsigned Op32,
1144 // Mostly direct translation to the given operations, except that we preserve
1145 // the AtomicOrdering for use later on.
1146 AtomicSDNode *AN = cast<AtomicSDNode>(Node);
1147 EVT VT = AN->getMemoryVT();
1152 else if (VT == MVT::i16)
1154 else if (VT == MVT::i32)
1156 else if (VT == MVT::i64)
1159 llvm_unreachable("Unexpected atomic operation");
1161 SmallVector<SDValue, 4> Ops;
1162 for (unsigned i = 1; i < AN->getNumOperands(); ++i)
1163 Ops.push_back(AN->getOperand(i));
1165 Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
1166 Ops.push_back(AN->getOperand(0)); // Chain moves to the end
1168 return CurDAG->SelectNodeTo(Node, Op, AN->getValueType(0), MVT::Other,
1169 &Ops[0], Ops.size());
1172 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1173 unsigned &Opc, SDValue &Opd0,
1174 unsigned &LSB, unsigned &MSB,
1175 unsigned NumberOfIgnoredLowBits,
1176 bool BiggerPattern) {
1177 assert(N->getOpcode() == ISD::AND &&
1178 "N must be a AND operation to call this function");
1180 EVT VT = N->getValueType(0);
1182 // Here we can test the type of VT and return false when the type does not
1183 // match, but since it is done prior to that call in the current context
1184 // we turned that into an assert to avoid redundant code.
1185 assert((VT == MVT::i32 || VT == MVT::i64) &&
1186 "Type checking must have been done before calling this function");
1188 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1189 // changed the AND node to a 32-bit mask operation. We'll have to
1190 // undo that as part of the transform here if we want to catch all
1191 // the opportunities.
1192 // Currently the NumberOfIgnoredLowBits argument helps to recover
1193 // form these situations when matching bigger pattern (bitfield insert).
1195 // For unsigned extracts, check for a shift right and mask
1196 uint64_t And_imm = 0;
1197 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1200 const SDNode *Op0 = N->getOperand(0).getNode();
1202 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1203 // simplified. Try to undo that
1204 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1206 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1207 if (And_imm & (And_imm + 1))
1210 bool ClampMSB = false;
1211 uint64_t Srl_imm = 0;
1212 // Handle the SRL + ANY_EXTEND case.
1213 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1214 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1215 // Extend the incoming operand of the SRL to 64-bit.
1216 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1217 // Make sure to clamp the MSB so that we preserve the semantics of the
1218 // original operations.
1220 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1221 Opd0 = Op0->getOperand(0);
1222 } else if (BiggerPattern) {
1223 // Let's pretend a 0 shift right has been performed.
1224 // The resulting code will be at least as good as the original one
1225 // plus it may expose more opportunities for bitfield insert pattern.
1226 // FIXME: Currently we limit this to the bigger pattern, because
1227 // some optimizations expect AND and not UBFM
1228 Opd0 = N->getOperand(0);
1232 assert((BiggerPattern || (Srl_imm > 0 && Srl_imm < VT.getSizeInBits())) &&
1233 "bad amount in shift node!");
1236 MSB = Srl_imm + (VT == MVT::i32 ? CountTrailingOnes_32(And_imm)
1237 : CountTrailingOnes_64(And_imm)) -
1240 // Since we're moving the extend before the right shift operation, we need
1241 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1242 // the zeros which would get shifted in with the original right shift
1244 MSB = MSB > 31 ? 31 : MSB;
1246 Opc = VT == MVT::i32 ? ARM64::UBFMWri : ARM64::UBFMXri;
1250 static bool isOneBitExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1251 unsigned &LSB, unsigned &MSB) {
1252 // We are looking for the following pattern which basically extracts a single
1253 // bit from the source value and places it in the LSB of the destination
1254 // value, all other bits of the destination value or set to zero:
1256 // Value2 = AND Value, MaskImm
1257 // SRL Value2, ShiftImm
1259 // with MaskImm >> ShiftImm == 1.
1261 // This gets selected into a single UBFM:
1263 // UBFM Value, ShiftImm, ShiftImm
1266 if (N->getOpcode() != ISD::SRL)
1269 uint64_t And_mask = 0;
1270 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1273 Opd0 = N->getOperand(0).getOperand(0);
1275 uint64_t Srl_imm = 0;
1276 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1279 // Check whether we really have a one bit extract here.
1280 if (And_mask >> Srl_imm == 0x1) {
1281 if (N->getValueType(0) == MVT::i32)
1282 Opc = ARM64::UBFMWri;
1284 Opc = ARM64::UBFMXri;
1286 LSB = MSB = Srl_imm;
1294 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1295 unsigned &LSB, unsigned &MSB,
1296 bool BiggerPattern) {
1297 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1298 "N must be a SHR/SRA operation to call this function");
1300 EVT VT = N->getValueType(0);
1302 // Here we can test the type of VT and return false when the type does not
1303 // match, but since it is done prior to that call in the current context
1304 // we turned that into an assert to avoid redundant code.
1305 assert((VT == MVT::i32 || VT == MVT::i64) &&
1306 "Type checking must have been done before calling this function");
1308 // Check for AND + SRL doing a one bit extract.
1309 if (isOneBitExtractOpFromShr(N, Opc, Opd0, LSB, MSB))
1312 // we're looking for a shift of a shift
1313 uint64_t Shl_imm = 0;
1314 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1315 Opd0 = N->getOperand(0).getOperand(0);
1316 } else if (BiggerPattern) {
1317 // Let's pretend a 0 shift left has been performed.
1318 // FIXME: Currently we limit this to the bigger pattern case,
1319 // because some optimizations expect AND and not UBFM
1320 Opd0 = N->getOperand(0);
1324 assert(Shl_imm < VT.getSizeInBits() && "bad amount in shift node!");
1325 uint64_t Srl_imm = 0;
1326 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1329 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1330 "bad amount in shift node!");
1331 // Note: The width operand is encoded as width-1.
1332 unsigned Width = VT.getSizeInBits() - Srl_imm - 1;
1333 int sLSB = Srl_imm - Shl_imm;
1338 // SRA requires a signed extraction
1340 Opc = N->getOpcode() == ISD::SRA ? ARM64::SBFMWri : ARM64::UBFMWri;
1342 Opc = N->getOpcode() == ISD::SRA ? ARM64::SBFMXri : ARM64::UBFMXri;
1346 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1347 SDValue &Opd0, unsigned &LSB, unsigned &MSB,
1348 unsigned NumberOfIgnoredLowBits = 0,
1349 bool BiggerPattern = false) {
1350 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1353 switch (N->getOpcode()) {
1355 if (!N->isMachineOpcode())
1359 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB, MSB,
1360 NumberOfIgnoredLowBits, BiggerPattern);
1363 return isBitfieldExtractOpFromShr(N, Opc, Opd0, LSB, MSB, BiggerPattern);
1366 unsigned NOpc = N->getMachineOpcode();
1370 case ARM64::SBFMWri:
1371 case ARM64::UBFMWri:
1372 case ARM64::SBFMXri:
1373 case ARM64::UBFMXri:
1375 Opd0 = N->getOperand(0);
1376 LSB = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1377 MSB = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1384 SDNode *ARM64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1385 unsigned Opc, LSB, MSB;
1387 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
1390 EVT VT = N->getValueType(0);
1391 SDValue Ops[] = { Opd0, CurDAG->getTargetConstant(LSB, VT),
1392 CurDAG->getTargetConstant(MSB, VT) };
1393 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 3);
1396 // Is mask a i32 or i64 binary sequence 1..10..0 and
1397 // CountTrailingZeros(mask) == ExpectedTrailingZeros
1398 static bool isHighMask(uint64_t Mask, unsigned ExpectedTrailingZeros,
1399 unsigned NumberOfIgnoredHighBits, EVT VT) {
1400 assert((VT == MVT::i32 || VT == MVT::i64) &&
1401 "i32 or i64 mask type expected!");
1403 uint64_t ExpectedMask;
1404 if (VT == MVT::i32) {
1405 uint32_t ExpectedMaski32 = ~0 << ExpectedTrailingZeros;
1406 ExpectedMask = ExpectedMaski32;
1407 if (NumberOfIgnoredHighBits) {
1408 uint32_t highMask = ~0 << (32 - NumberOfIgnoredHighBits);
1412 ExpectedMask = ((uint64_t) ~0) << ExpectedTrailingZeros;
1413 if (NumberOfIgnoredHighBits)
1414 Mask |= ((uint64_t) ~0) << (64 - NumberOfIgnoredHighBits);
1417 return Mask == ExpectedMask;
1420 // Look for bits that will be useful for later uses.
1421 // A bit is consider useless as soon as it is dropped and never used
1422 // before it as been dropped.
1423 // E.g., looking for useful bit of x
1426 // After #1, x useful bits are 0x7, then the useful bits of x, live through
1428 // After #2, the useful bits of x are 0x4.
1429 // However, if x is used on an unpredicatable instruction, then all its bits
1435 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1437 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1440 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1441 Imm = ARM64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1442 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1443 getUsefulBits(Op, UsefulBits, Depth + 1);
1446 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1447 uint64_t Imm, uint64_t MSB,
1449 // inherit the bitwidth value
1450 APInt OpUsefulBits(UsefulBits);
1454 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1456 // The interesting part will be in the lower part of the result
1457 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1458 // The interesting part was starting at Imm in the argument
1459 OpUsefulBits = OpUsefulBits.shl(Imm);
1461 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1463 // The interesting part will be shifted in the result
1464 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1465 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1466 // The interesting part was at zero in the argument
1467 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1470 UsefulBits &= OpUsefulBits;
1473 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1476 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1478 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1480 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1483 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1485 uint64_t ShiftTypeAndValue =
1486 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1487 APInt Mask(UsefulBits);
1488 Mask.clearAllBits();
1491 if (ARM64_AM::getShiftType(ShiftTypeAndValue) == ARM64_AM::LSL) {
1493 uint64_t ShiftAmt = ARM64_AM::getShiftValue(ShiftTypeAndValue);
1494 Mask = Mask.shl(ShiftAmt);
1495 getUsefulBits(Op, Mask, Depth + 1);
1496 Mask = Mask.lshr(ShiftAmt);
1497 } else if (ARM64_AM::getShiftType(ShiftTypeAndValue) == ARM64_AM::LSR) {
1499 // We do not handle ARM64_AM::ASR, because the sign will change the
1500 // number of useful bits
1501 uint64_t ShiftAmt = ARM64_AM::getShiftValue(ShiftTypeAndValue);
1502 Mask = Mask.lshr(ShiftAmt);
1503 getUsefulBits(Op, Mask, Depth + 1);
1504 Mask = Mask.shl(ShiftAmt);
1511 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1514 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1516 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1518 if (Op.getOperand(1) == Orig)
1519 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1521 APInt OpUsefulBits(UsefulBits);
1525 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1527 UsefulBits &= ~OpUsefulBits;
1528 getUsefulBits(Op, UsefulBits, Depth + 1);
1530 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1532 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1533 getUsefulBits(Op, UsefulBits, Depth + 1);
1537 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1538 SDValue Orig, unsigned Depth) {
1540 // Users of this node should have already been instruction selected
1541 // FIXME: Can we turn that into an assert?
1542 if (!UserNode->isMachineOpcode())
1545 switch (UserNode->getMachineOpcode()) {
1548 case ARM64::ANDSWri:
1549 case ARM64::ANDSXri:
1552 // We increment Depth only when we call the getUsefulBits
1553 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1555 case ARM64::UBFMWri:
1556 case ARM64::UBFMXri:
1557 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1561 if (UserNode->getOperand(1) != Orig)
1563 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1567 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1571 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1574 // Initialize UsefulBits
1576 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1577 // At the beginning, assume every produced bits is useful
1578 UsefulBits = APInt(Bitwidth, 0);
1579 UsefulBits.flipAllBits();
1581 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1583 for (SDNode *Node : Op.getNode()->uses()) {
1584 // A use cannot produce useful bits
1585 APInt UsefulBitsForUse = APInt(UsefulBits);
1586 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1587 UsersUsefulBits |= UsefulBitsForUse;
1589 // UsefulBits contains the produced bits that are meaningful for the
1590 // current definition, thus a user cannot make a bit meaningful at
1592 UsefulBits &= UsersUsefulBits;
1595 // Given a OR operation, check if we have the following pattern
1596 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
1597 // isBitfieldExtractOp)
1598 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1599 // countTrailingZeros(mask2) == imm2 - imm + 1
1601 // if yes, given reference arguments will be update so that one can replace
1602 // the OR instruction with:
1603 // f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1604 static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1605 SDValue &Opd1, unsigned &LSB,
1606 unsigned &MSB, SelectionDAG *CurDAG) {
1607 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1610 EVT VT = N->getValueType(0);
1612 Opc = ARM64::BFMWri;
1613 else if (VT == MVT::i64)
1614 Opc = ARM64::BFMXri;
1618 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1619 // have the expected shape. Try to undo that.
1621 getUsefulBits(SDValue(N, 0), UsefulBits);
1623 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1624 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1626 // OR is commutative, check both possibilities (does llvm provide a
1627 // way to do that directely, e.g., via code matcher?)
1628 SDValue OrOpd1Val = N->getOperand(1);
1629 SDNode *OrOpd0 = N->getOperand(0).getNode();
1630 SDNode *OrOpd1 = N->getOperand(1).getNode();
1631 for (int i = 0; i < 2;
1632 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
1634 // Set Opd1, LSB and MSB arguments by looking for
1635 // c = ubfm b, imm, imm2
1636 if (!isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Opd1, LSB, MSB,
1637 NumberOfIgnoredLowBits, true))
1640 // Check that the returned opcode is compatible with the pattern,
1641 // i.e., same type and zero extended (U and not S)
1642 if ((BFXOpc != ARM64::UBFMXri && VT == MVT::i64) ||
1643 (BFXOpc != ARM64::UBFMWri && VT == MVT::i32))
1646 // Compute the width of the bitfield insertion
1647 int sMSB = MSB - LSB + 1;
1648 // FIXME: This constraints is to catch bitfield insertion we may
1649 // want to widen the pattern if we want to grab general bitfied
1654 // Check the second part of the pattern
1655 EVT VT = OrOpd1->getValueType(0);
1656 if (VT != MVT::i32 && VT != MVT::i64)
1659 // Compute the Known Zero for the candidate of the first operand.
1660 // This allows to catch more general case than just looking for
1661 // AND with imm. Indeed, simplify-demanded-bits may have removed
1662 // the AND instruction because it proves it was useless.
1663 APInt KnownZero, KnownOne;
1664 CurDAG->ComputeMaskedBits(OrOpd1Val, KnownZero, KnownOne);
1666 // Check if there is enough room for the second operand to appear
1668 if (KnownZero.countTrailingOnes() < (unsigned)sMSB)
1671 // Set the first operand
1673 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
1674 isHighMask(Imm, sMSB, NumberOfIgnoredHighBits, VT))
1675 // In that case, we can eliminate the AND
1676 Opd0 = OrOpd1->getOperand(0);
1678 // Maybe the AND has been removed by simplify-demanded-bits
1679 // or is useful because it discards more bits
1689 SDNode *ARM64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
1690 if (N->getOpcode() != ISD::OR)
1697 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
1700 EVT VT = N->getValueType(0);
1701 SDValue Ops[] = { Opd0,
1703 CurDAG->getTargetConstant(LSB, VT),
1704 CurDAG->getTargetConstant(MSB, VT) };
1705 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 4);
1708 SDNode *ARM64DAGToDAGISel::SelectLIBM(SDNode *N) {
1709 EVT VT = N->getValueType(0);
1712 unsigned FRINTXOpcs[] = { ARM64::FRINTXSr, ARM64::FRINTXDr };
1714 if (VT == MVT::f32) {
1716 } else if (VT == MVT::f64) {
1719 return 0; // Unrecognized argument type. Fall back on default codegen.
1721 // Pick the FRINTX variant needed to set the flags.
1722 unsigned FRINTXOpc = FRINTXOpcs[Variant];
1724 switch (N->getOpcode()) {
1726 return 0; // Unrecognized libm ISD node. Fall back on default codegen.
1728 unsigned FRINTPOpcs[] = { ARM64::FRINTPSr, ARM64::FRINTPDr };
1729 Opc = FRINTPOpcs[Variant];
1733 unsigned FRINTMOpcs[] = { ARM64::FRINTMSr, ARM64::FRINTMDr };
1734 Opc = FRINTMOpcs[Variant];
1738 unsigned FRINTZOpcs[] = { ARM64::FRINTZSr, ARM64::FRINTZDr };
1739 Opc = FRINTZOpcs[Variant];
1743 unsigned FRINTAOpcs[] = { ARM64::FRINTASr, ARM64::FRINTADr };
1744 Opc = FRINTAOpcs[Variant];
1750 SDValue In = N->getOperand(0);
1751 SmallVector<SDValue, 2> Ops;
1754 if (!TM.Options.UnsafeFPMath) {
1755 SDNode *FRINTX = CurDAG->getMachineNode(FRINTXOpc, dl, VT, MVT::Glue, In);
1756 Ops.push_back(SDValue(FRINTX, 1));
1759 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
1763 ARM64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
1764 unsigned RegWidth) {
1766 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
1767 FVal = CN->getValueAPF();
1768 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
1769 // Some otherwise illegal constants are allowed in this case.
1770 if (LN->getOperand(1).getOpcode() != ARM64ISD::ADDlow ||
1771 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
1774 ConstantPoolSDNode *CN =
1775 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
1776 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
1780 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
1781 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
1784 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
1785 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
1789 // fbits is between 1 and 64 in the worst-case, which means the fmul
1790 // could have 2^64 as an actual operand. Need 65 bits of precision.
1791 APSInt IntVal(65, true);
1792 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
1794 // N.b. isPowerOf2 also checks for > 0.
1795 if (!IsExact || !IntVal.isPowerOf2()) return false;
1796 unsigned FBits = IntVal.logBase2();
1798 // Checks above should have guaranteed that we haven't lost information in
1799 // finding FBits, but it must still be in range.
1800 if (FBits == 0 || FBits > RegWidth) return false;
1802 FixedPos = CurDAG->getTargetConstant(FBits, MVT::i32);
1806 SDNode *ARM64DAGToDAGISel::Select(SDNode *Node) {
1807 // Dump information about the Node being selected
1808 DEBUG(errs() << "Selecting: ");
1809 DEBUG(Node->dump(CurDAG));
1810 DEBUG(errs() << "\n");
1812 // If we have a custom node, we already have selected!
1813 if (Node->isMachineOpcode()) {
1814 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
1815 Node->setNodeId(-1);
1819 // Few custom selection stuff.
1820 SDNode *ResNode = 0;
1821 EVT VT = Node->getValueType(0);
1823 switch (Node->getOpcode()) {
1828 if (SDNode *I = SelectMLAV64LaneV128(Node))
1832 case ISD::ATOMIC_LOAD_ADD:
1833 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_ADD_I8,
1834 ARM64::ATOMIC_LOAD_ADD_I16, ARM64::ATOMIC_LOAD_ADD_I32,
1835 ARM64::ATOMIC_LOAD_ADD_I64);
1836 case ISD::ATOMIC_LOAD_SUB:
1837 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_SUB_I8,
1838 ARM64::ATOMIC_LOAD_SUB_I16, ARM64::ATOMIC_LOAD_SUB_I32,
1839 ARM64::ATOMIC_LOAD_SUB_I64);
1840 case ISD::ATOMIC_LOAD_AND:
1841 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_AND_I8,
1842 ARM64::ATOMIC_LOAD_AND_I16, ARM64::ATOMIC_LOAD_AND_I32,
1843 ARM64::ATOMIC_LOAD_AND_I64);
1844 case ISD::ATOMIC_LOAD_OR:
1845 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_OR_I8,
1846 ARM64::ATOMIC_LOAD_OR_I16, ARM64::ATOMIC_LOAD_OR_I32,
1847 ARM64::ATOMIC_LOAD_OR_I64);
1848 case ISD::ATOMIC_LOAD_XOR:
1849 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_XOR_I8,
1850 ARM64::ATOMIC_LOAD_XOR_I16, ARM64::ATOMIC_LOAD_XOR_I32,
1851 ARM64::ATOMIC_LOAD_XOR_I64);
1852 case ISD::ATOMIC_LOAD_NAND:
1853 return SelectAtomic(
1854 Node, ARM64::ATOMIC_LOAD_NAND_I8, ARM64::ATOMIC_LOAD_NAND_I16,
1855 ARM64::ATOMIC_LOAD_NAND_I32, ARM64::ATOMIC_LOAD_NAND_I64);
1856 case ISD::ATOMIC_LOAD_MIN:
1857 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_MIN_I8,
1858 ARM64::ATOMIC_LOAD_MIN_I16, ARM64::ATOMIC_LOAD_MIN_I32,
1859 ARM64::ATOMIC_LOAD_MIN_I64);
1860 case ISD::ATOMIC_LOAD_MAX:
1861 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_MAX_I8,
1862 ARM64::ATOMIC_LOAD_MAX_I16, ARM64::ATOMIC_LOAD_MAX_I32,
1863 ARM64::ATOMIC_LOAD_MAX_I64);
1864 case ISD::ATOMIC_LOAD_UMIN:
1865 return SelectAtomic(
1866 Node, ARM64::ATOMIC_LOAD_UMIN_I8, ARM64::ATOMIC_LOAD_UMIN_I16,
1867 ARM64::ATOMIC_LOAD_UMIN_I32, ARM64::ATOMIC_LOAD_UMIN_I64);
1868 case ISD::ATOMIC_LOAD_UMAX:
1869 return SelectAtomic(
1870 Node, ARM64::ATOMIC_LOAD_UMAX_I8, ARM64::ATOMIC_LOAD_UMAX_I16,
1871 ARM64::ATOMIC_LOAD_UMAX_I32, ARM64::ATOMIC_LOAD_UMAX_I64);
1872 case ISD::ATOMIC_SWAP:
1873 return SelectAtomic(Node, ARM64::ATOMIC_SWAP_I8, ARM64::ATOMIC_SWAP_I16,
1874 ARM64::ATOMIC_SWAP_I32, ARM64::ATOMIC_SWAP_I64);
1875 case ISD::ATOMIC_CMP_SWAP:
1876 return SelectAtomic(Node, ARM64::ATOMIC_CMP_SWAP_I8,
1877 ARM64::ATOMIC_CMP_SWAP_I16, ARM64::ATOMIC_CMP_SWAP_I32,
1878 ARM64::ATOMIC_CMP_SWAP_I64);
1881 // Try to select as an indexed load. Fall through to normal processing
1884 SDNode *I = SelectIndexedLoad(Node, Done);
1893 if (SDNode *I = SelectBitfieldExtractOp(Node))
1898 if (SDNode *I = SelectBitfieldInsertOp(Node))
1902 case ISD::EXTRACT_VECTOR_ELT: {
1903 // Extracting lane zero is a special case where we can just use a plain
1904 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
1905 // the rest of the compiler, especially the register allocator and copyi
1906 // propagation, to reason about, so is preferred when it's possible to
1908 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
1909 // Bail and use the default Select() for non-zero lanes.
1910 if (LaneNode->getZExtValue() != 0)
1912 // If the element type is not the same as the result type, likewise
1913 // bail and use the default Select(), as there's more to do than just
1914 // a cross-class COPY. This catches extracts of i8 and i16 elements
1915 // since they will need an explicit zext.
1916 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
1919 switch (Node->getOperand(0)
1921 .getVectorElementType()
1924 assert(0 && "Unexpected vector element type!");
1926 SubReg = ARM64::dsub;
1929 SubReg = ARM64::ssub;
1931 case 16: // FALLTHROUGH
1933 llvm_unreachable("unexpected zext-requiring extract element!");
1935 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
1936 Node->getOperand(0));
1937 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
1938 DEBUG(Extract->dumpr(CurDAG));
1939 DEBUG(dbgs() << "\n");
1940 return Extract.getNode();
1942 case ISD::Constant: {
1943 // Materialize zero constants as copies from WZR/XZR. This allows
1944 // the coalescer to propagate these into other instructions.
1945 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
1946 if (ConstNode->isNullValue()) {
1948 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
1949 ARM64::WZR, MVT::i32).getNode();
1950 else if (VT == MVT::i64)
1951 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
1952 ARM64::XZR, MVT::i64).getNode();
1957 case ISD::FrameIndex: {
1958 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
1959 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
1960 unsigned Shifter = ARM64_AM::getShifterImm(ARM64_AM::LSL, 0);
1961 const TargetLowering *TLI = getTargetLowering();
1962 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1963 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
1964 CurDAG->getTargetConstant(Shifter, MVT::i32) };
1965 return CurDAG->SelectNodeTo(Node, ARM64::ADDXri, MVT::i64, Ops, 3);
1967 case ISD::INTRINSIC_W_CHAIN: {
1968 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1972 case Intrinsic::arm64_ldxp: {
1973 SDValue MemAddr = Node->getOperand(2);
1975 SDValue Chain = Node->getOperand(0);
1977 SDNode *Ld = CurDAG->getMachineNode(ARM64::LDXPX, DL, MVT::i64, MVT::i64,
1978 MVT::Other, MemAddr, Chain);
1980 // Transfer memoperands.
1981 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1982 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
1983 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
1986 case Intrinsic::arm64_stxp: {
1988 SDValue Chain = Node->getOperand(0);
1989 SDValue ValLo = Node->getOperand(2);
1990 SDValue ValHi = Node->getOperand(3);
1991 SDValue MemAddr = Node->getOperand(4);
1993 // Place arguments in the right order.
1994 SmallVector<SDValue, 7> Ops;
1995 Ops.push_back(ValLo);
1996 Ops.push_back(ValHi);
1997 Ops.push_back(MemAddr);
1998 Ops.push_back(Chain);
2001 CurDAG->getMachineNode(ARM64::STXPX, DL, MVT::i32, MVT::Other, Ops);
2002 // Transfer memoperands.
2003 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2004 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2005 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2009 case Intrinsic::arm64_neon_ld1x2:
2010 if (VT == MVT::v8i8)
2011 return SelectLoad(Node, 2, ARM64::LD1Twov8b, ARM64::dsub0);
2012 else if (VT == MVT::v16i8)
2013 return SelectLoad(Node, 2, ARM64::LD1Twov16b, ARM64::qsub0);
2014 else if (VT == MVT::v4i16)
2015 return SelectLoad(Node, 2, ARM64::LD1Twov4h, ARM64::dsub0);
2016 else if (VT == MVT::v8i16)
2017 return SelectLoad(Node, 2, ARM64::LD1Twov8h, ARM64::qsub0);
2018 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2019 return SelectLoad(Node, 2, ARM64::LD1Twov2s, ARM64::dsub0);
2020 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2021 return SelectLoad(Node, 2, ARM64::LD1Twov4s, ARM64::qsub0);
2022 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2023 return SelectLoad(Node, 2, ARM64::LD1Twov1d, ARM64::dsub0);
2024 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2025 return SelectLoad(Node, 2, ARM64::LD1Twov2d, ARM64::qsub0);
2027 case Intrinsic::arm64_neon_ld1x3:
2028 if (VT == MVT::v8i8)
2029 return SelectLoad(Node, 3, ARM64::LD1Threev8b, ARM64::dsub0);
2030 else if (VT == MVT::v16i8)
2031 return SelectLoad(Node, 3, ARM64::LD1Threev16b, ARM64::qsub0);
2032 else if (VT == MVT::v4i16)
2033 return SelectLoad(Node, 3, ARM64::LD1Threev4h, ARM64::dsub0);
2034 else if (VT == MVT::v8i16)
2035 return SelectLoad(Node, 3, ARM64::LD1Threev8h, ARM64::qsub0);
2036 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2037 return SelectLoad(Node, 3, ARM64::LD1Threev2s, ARM64::dsub0);
2038 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2039 return SelectLoad(Node, 3, ARM64::LD1Threev4s, ARM64::qsub0);
2040 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2041 return SelectLoad(Node, 3, ARM64::LD1Threev1d, ARM64::dsub0);
2042 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2043 return SelectLoad(Node, 3, ARM64::LD1Threev2d, ARM64::qsub0);
2045 case Intrinsic::arm64_neon_ld1x4:
2046 if (VT == MVT::v8i8)
2047 return SelectLoad(Node, 4, ARM64::LD1Fourv8b, ARM64::dsub0);
2048 else if (VT == MVT::v16i8)
2049 return SelectLoad(Node, 4, ARM64::LD1Fourv16b, ARM64::qsub0);
2050 else if (VT == MVT::v4i16)
2051 return SelectLoad(Node, 4, ARM64::LD1Fourv4h, ARM64::dsub0);
2052 else if (VT == MVT::v8i16)
2053 return SelectLoad(Node, 4, ARM64::LD1Fourv8h, ARM64::qsub0);
2054 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2055 return SelectLoad(Node, 4, ARM64::LD1Fourv2s, ARM64::dsub0);
2056 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2057 return SelectLoad(Node, 4, ARM64::LD1Fourv4s, ARM64::qsub0);
2058 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2059 return SelectLoad(Node, 4, ARM64::LD1Fourv1d, ARM64::dsub0);
2060 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2061 return SelectLoad(Node, 4, ARM64::LD1Fourv2d, ARM64::qsub0);
2063 case Intrinsic::arm64_neon_ld2:
2064 if (VT == MVT::v8i8)
2065 return SelectLoad(Node, 2, ARM64::LD2Twov8b, ARM64::dsub0);
2066 else if (VT == MVT::v16i8)
2067 return SelectLoad(Node, 2, ARM64::LD2Twov16b, ARM64::qsub0);
2068 else if (VT == MVT::v4i16)
2069 return SelectLoad(Node, 2, ARM64::LD2Twov4h, ARM64::dsub0);
2070 else if (VT == MVT::v8i16)
2071 return SelectLoad(Node, 2, ARM64::LD2Twov8h, ARM64::qsub0);
2072 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2073 return SelectLoad(Node, 2, ARM64::LD2Twov2s, ARM64::dsub0);
2074 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2075 return SelectLoad(Node, 2, ARM64::LD2Twov4s, ARM64::qsub0);
2076 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2077 return SelectLoad(Node, 2, ARM64::LD1Twov1d, ARM64::dsub0);
2078 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2079 return SelectLoad(Node, 2, ARM64::LD2Twov2d, ARM64::qsub0);
2081 case Intrinsic::arm64_neon_ld3:
2082 if (VT == MVT::v8i8)
2083 return SelectLoad(Node, 3, ARM64::LD3Threev8b, ARM64::dsub0);
2084 else if (VT == MVT::v16i8)
2085 return SelectLoad(Node, 3, ARM64::LD3Threev16b, ARM64::qsub0);
2086 else if (VT == MVT::v4i16)
2087 return SelectLoad(Node, 3, ARM64::LD3Threev4h, ARM64::dsub0);
2088 else if (VT == MVT::v8i16)
2089 return SelectLoad(Node, 3, ARM64::LD3Threev8h, ARM64::qsub0);
2090 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2091 return SelectLoad(Node, 3, ARM64::LD3Threev2s, ARM64::dsub0);
2092 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2093 return SelectLoad(Node, 3, ARM64::LD3Threev4s, ARM64::qsub0);
2094 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2095 return SelectLoad(Node, 3, ARM64::LD1Threev1d, ARM64::dsub0);
2096 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2097 return SelectLoad(Node, 3, ARM64::LD3Threev2d, ARM64::qsub0);
2099 case Intrinsic::arm64_neon_ld4:
2100 if (VT == MVT::v8i8)
2101 return SelectLoad(Node, 4, ARM64::LD4Fourv8b, ARM64::dsub0);
2102 else if (VT == MVT::v16i8)
2103 return SelectLoad(Node, 4, ARM64::LD4Fourv16b, ARM64::qsub0);
2104 else if (VT == MVT::v4i16)
2105 return SelectLoad(Node, 4, ARM64::LD4Fourv4h, ARM64::dsub0);
2106 else if (VT == MVT::v8i16)
2107 return SelectLoad(Node, 4, ARM64::LD4Fourv8h, ARM64::qsub0);
2108 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2109 return SelectLoad(Node, 4, ARM64::LD4Fourv2s, ARM64::dsub0);
2110 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2111 return SelectLoad(Node, 4, ARM64::LD4Fourv4s, ARM64::qsub0);
2112 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2113 return SelectLoad(Node, 4, ARM64::LD1Fourv1d, ARM64::dsub0);
2114 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2115 return SelectLoad(Node, 4, ARM64::LD4Fourv2d, ARM64::qsub0);
2117 case Intrinsic::arm64_neon_ld2r:
2118 if (VT == MVT::v8i8)
2119 return SelectLoad(Node, 2, ARM64::LD2Rv8b, ARM64::dsub0);
2120 else if (VT == MVT::v16i8)
2121 return SelectLoad(Node, 2, ARM64::LD2Rv16b, ARM64::qsub0);
2122 else if (VT == MVT::v4i16)
2123 return SelectLoad(Node, 2, ARM64::LD2Rv4h, ARM64::dsub0);
2124 else if (VT == MVT::v8i16)
2125 return SelectLoad(Node, 2, ARM64::LD2Rv8h, ARM64::qsub0);
2126 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2127 return SelectLoad(Node, 2, ARM64::LD2Rv2s, ARM64::dsub0);
2128 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2129 return SelectLoad(Node, 2, ARM64::LD2Rv4s, ARM64::qsub0);
2130 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2131 return SelectLoad(Node, 2, ARM64::LD2Rv1d, ARM64::dsub0);
2132 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2133 return SelectLoad(Node, 2, ARM64::LD2Rv2d, ARM64::qsub0);
2135 case Intrinsic::arm64_neon_ld3r:
2136 if (VT == MVT::v8i8)
2137 return SelectLoad(Node, 3, ARM64::LD3Rv8b, ARM64::dsub0);
2138 else if (VT == MVT::v16i8)
2139 return SelectLoad(Node, 3, ARM64::LD3Rv16b, ARM64::qsub0);
2140 else if (VT == MVT::v4i16)
2141 return SelectLoad(Node, 3, ARM64::LD3Rv4h, ARM64::dsub0);
2142 else if (VT == MVT::v8i16)
2143 return SelectLoad(Node, 3, ARM64::LD3Rv8h, ARM64::qsub0);
2144 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2145 return SelectLoad(Node, 3, ARM64::LD3Rv2s, ARM64::dsub0);
2146 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2147 return SelectLoad(Node, 3, ARM64::LD3Rv4s, ARM64::qsub0);
2148 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2149 return SelectLoad(Node, 3, ARM64::LD3Rv1d, ARM64::dsub0);
2150 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2151 return SelectLoad(Node, 3, ARM64::LD3Rv2d, ARM64::qsub0);
2153 case Intrinsic::arm64_neon_ld4r:
2154 if (VT == MVT::v8i8)
2155 return SelectLoad(Node, 4, ARM64::LD4Rv8b, ARM64::dsub0);
2156 else if (VT == MVT::v16i8)
2157 return SelectLoad(Node, 4, ARM64::LD4Rv16b, ARM64::qsub0);
2158 else if (VT == MVT::v4i16)
2159 return SelectLoad(Node, 4, ARM64::LD4Rv4h, ARM64::dsub0);
2160 else if (VT == MVT::v8i16)
2161 return SelectLoad(Node, 4, ARM64::LD4Rv8h, ARM64::qsub0);
2162 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2163 return SelectLoad(Node, 4, ARM64::LD4Rv2s, ARM64::dsub0);
2164 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2165 return SelectLoad(Node, 4, ARM64::LD4Rv4s, ARM64::qsub0);
2166 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2167 return SelectLoad(Node, 4, ARM64::LD4Rv1d, ARM64::dsub0);
2168 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2169 return SelectLoad(Node, 4, ARM64::LD4Rv2d, ARM64::qsub0);
2171 case Intrinsic::arm64_neon_ld2lane:
2172 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2173 return SelectLoadLane(Node, 2, ARM64::LD2i8);
2174 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2175 return SelectLoadLane(Node, 2, ARM64::LD2i16);
2176 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2178 return SelectLoadLane(Node, 2, ARM64::LD2i32);
2179 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2181 return SelectLoadLane(Node, 2, ARM64::LD2i64);
2183 case Intrinsic::arm64_neon_ld3lane:
2184 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2185 return SelectLoadLane(Node, 3, ARM64::LD3i8);
2186 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2187 return SelectLoadLane(Node, 3, ARM64::LD3i16);
2188 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2190 return SelectLoadLane(Node, 3, ARM64::LD3i32);
2191 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2193 return SelectLoadLane(Node, 3, ARM64::LD3i64);
2195 case Intrinsic::arm64_neon_ld4lane:
2196 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2197 return SelectLoadLane(Node, 4, ARM64::LD4i8);
2198 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2199 return SelectLoadLane(Node, 4, ARM64::LD4i16);
2200 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2202 return SelectLoadLane(Node, 4, ARM64::LD4i32);
2203 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2205 return SelectLoadLane(Node, 4, ARM64::LD4i64);
2209 case ISD::INTRINSIC_WO_CHAIN: {
2210 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2214 case Intrinsic::arm64_neon_tbl2:
2215 return SelectTable(Node, 2, VT == MVT::v8i8 ? ARM64::TBLv8i8Two
2216 : ARM64::TBLv16i8Two,
2218 case Intrinsic::arm64_neon_tbl3:
2219 return SelectTable(Node, 3, VT == MVT::v8i8 ? ARM64::TBLv8i8Three
2220 : ARM64::TBLv16i8Three,
2222 case Intrinsic::arm64_neon_tbl4:
2223 return SelectTable(Node, 4, VT == MVT::v8i8 ? ARM64::TBLv8i8Four
2224 : ARM64::TBLv16i8Four,
2226 case Intrinsic::arm64_neon_tbx2:
2227 return SelectTable(Node, 2, VT == MVT::v8i8 ? ARM64::TBXv8i8Two
2228 : ARM64::TBXv16i8Two,
2230 case Intrinsic::arm64_neon_tbx3:
2231 return SelectTable(Node, 3, VT == MVT::v8i8 ? ARM64::TBXv8i8Three
2232 : ARM64::TBXv16i8Three,
2234 case Intrinsic::arm64_neon_tbx4:
2235 return SelectTable(Node, 4, VT == MVT::v8i8 ? ARM64::TBXv8i8Four
2236 : ARM64::TBXv16i8Four,
2238 case Intrinsic::arm64_neon_smull:
2239 case Intrinsic::arm64_neon_umull:
2240 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2246 case ISD::INTRINSIC_VOID: {
2247 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2248 if (Node->getNumOperands() >= 3)
2249 VT = Node->getOperand(2)->getValueType(0);
2253 case Intrinsic::arm64_neon_st1x2: {
2254 if (VT == MVT::v8i8)
2255 return SelectStore(Node, 2, ARM64::ST1Twov8b);
2256 else if (VT == MVT::v16i8)
2257 return SelectStore(Node, 2, ARM64::ST1Twov16b);
2258 else if (VT == MVT::v4i16)
2259 return SelectStore(Node, 2, ARM64::ST1Twov4h);
2260 else if (VT == MVT::v8i16)
2261 return SelectStore(Node, 2, ARM64::ST1Twov8h);
2262 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2263 return SelectStore(Node, 2, ARM64::ST1Twov2s);
2264 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2265 return SelectStore(Node, 2, ARM64::ST1Twov4s);
2266 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2267 return SelectStore(Node, 2, ARM64::ST1Twov2d);
2268 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2269 return SelectStore(Node, 2, ARM64::ST1Twov1d);
2272 case Intrinsic::arm64_neon_st1x3: {
2273 if (VT == MVT::v8i8)
2274 return SelectStore(Node, 3, ARM64::ST1Threev8b);
2275 else if (VT == MVT::v16i8)
2276 return SelectStore(Node, 3, ARM64::ST1Threev16b);
2277 else if (VT == MVT::v4i16)
2278 return SelectStore(Node, 3, ARM64::ST1Threev4h);
2279 else if (VT == MVT::v8i16)
2280 return SelectStore(Node, 3, ARM64::ST1Threev8h);
2281 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2282 return SelectStore(Node, 3, ARM64::ST1Threev2s);
2283 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2284 return SelectStore(Node, 3, ARM64::ST1Threev4s);
2285 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2286 return SelectStore(Node, 3, ARM64::ST1Threev2d);
2287 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2288 return SelectStore(Node, 3, ARM64::ST1Threev1d);
2291 case Intrinsic::arm64_neon_st1x4: {
2292 if (VT == MVT::v8i8)
2293 return SelectStore(Node, 4, ARM64::ST1Fourv8b);
2294 else if (VT == MVT::v16i8)
2295 return SelectStore(Node, 4, ARM64::ST1Fourv16b);
2296 else if (VT == MVT::v4i16)
2297 return SelectStore(Node, 4, ARM64::ST1Fourv4h);
2298 else if (VT == MVT::v8i16)
2299 return SelectStore(Node, 4, ARM64::ST1Fourv8h);
2300 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2301 return SelectStore(Node, 4, ARM64::ST1Fourv2s);
2302 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2303 return SelectStore(Node, 4, ARM64::ST1Fourv4s);
2304 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2305 return SelectStore(Node, 4, ARM64::ST1Fourv2d);
2306 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2307 return SelectStore(Node, 4, ARM64::ST1Fourv1d);
2310 case Intrinsic::arm64_neon_st2: {
2311 if (VT == MVT::v8i8)
2312 return SelectStore(Node, 2, ARM64::ST2Twov8b);
2313 else if (VT == MVT::v16i8)
2314 return SelectStore(Node, 2, ARM64::ST2Twov16b);
2315 else if (VT == MVT::v4i16)
2316 return SelectStore(Node, 2, ARM64::ST2Twov4h);
2317 else if (VT == MVT::v8i16)
2318 return SelectStore(Node, 2, ARM64::ST2Twov8h);
2319 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2320 return SelectStore(Node, 2, ARM64::ST2Twov2s);
2321 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2322 return SelectStore(Node, 2, ARM64::ST2Twov4s);
2323 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2324 return SelectStore(Node, 2, ARM64::ST2Twov2d);
2325 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2326 return SelectStore(Node, 2, ARM64::ST1Twov1d);
2329 case Intrinsic::arm64_neon_st3: {
2330 if (VT == MVT::v8i8)
2331 return SelectStore(Node, 3, ARM64::ST3Threev8b);
2332 else if (VT == MVT::v16i8)
2333 return SelectStore(Node, 3, ARM64::ST3Threev16b);
2334 else if (VT == MVT::v4i16)
2335 return SelectStore(Node, 3, ARM64::ST3Threev4h);
2336 else if (VT == MVT::v8i16)
2337 return SelectStore(Node, 3, ARM64::ST3Threev8h);
2338 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2339 return SelectStore(Node, 3, ARM64::ST3Threev2s);
2340 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2341 return SelectStore(Node, 3, ARM64::ST3Threev4s);
2342 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2343 return SelectStore(Node, 3, ARM64::ST3Threev2d);
2344 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2345 return SelectStore(Node, 3, ARM64::ST1Threev1d);
2348 case Intrinsic::arm64_neon_st4: {
2349 if (VT == MVT::v8i8)
2350 return SelectStore(Node, 4, ARM64::ST4Fourv8b);
2351 else if (VT == MVT::v16i8)
2352 return SelectStore(Node, 4, ARM64::ST4Fourv16b);
2353 else if (VT == MVT::v4i16)
2354 return SelectStore(Node, 4, ARM64::ST4Fourv4h);
2355 else if (VT == MVT::v8i16)
2356 return SelectStore(Node, 4, ARM64::ST4Fourv8h);
2357 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2358 return SelectStore(Node, 4, ARM64::ST4Fourv2s);
2359 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2360 return SelectStore(Node, 4, ARM64::ST4Fourv4s);
2361 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2362 return SelectStore(Node, 4, ARM64::ST4Fourv2d);
2363 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2364 return SelectStore(Node, 4, ARM64::ST1Fourv1d);
2367 case Intrinsic::arm64_neon_st2lane: {
2368 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2369 return SelectStoreLane(Node, 2, ARM64::ST2i8);
2370 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2371 return SelectStoreLane(Node, 2, ARM64::ST2i16);
2372 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2374 return SelectStoreLane(Node, 2, ARM64::ST2i32);
2375 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2377 return SelectStoreLane(Node, 2, ARM64::ST2i64);
2380 case Intrinsic::arm64_neon_st3lane: {
2381 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2382 return SelectStoreLane(Node, 3, ARM64::ST3i8);
2383 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2384 return SelectStoreLane(Node, 3, ARM64::ST3i16);
2385 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2387 return SelectStoreLane(Node, 3, ARM64::ST3i32);
2388 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2390 return SelectStoreLane(Node, 3, ARM64::ST3i64);
2393 case Intrinsic::arm64_neon_st4lane: {
2394 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2395 return SelectStoreLane(Node, 4, ARM64::ST4i8);
2396 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2397 return SelectStoreLane(Node, 4, ARM64::ST4i16);
2398 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2400 return SelectStoreLane(Node, 4, ARM64::ST4i32);
2401 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2403 return SelectStoreLane(Node, 4, ARM64::ST4i64);
2413 if (SDNode *I = SelectLIBM(Node))
2418 // Select the default instruction
2419 ResNode = SelectCode(Node);
2421 DEBUG(errs() << "=> ");
2422 if (ResNode == NULL || ResNode == Node)
2423 DEBUG(Node->dump(CurDAG));
2425 DEBUG(ResNode->dump(CurDAG));
2426 DEBUG(errs() << "\n");
2431 /// createARM64ISelDag - This pass converts a legalized DAG into a
2432 /// ARM64-specific DAG, ready for instruction scheduling.
2433 FunctionPass *llvm::createARM64ISelDag(ARM64TargetMachine &TM,
2434 CodeGenOpt::Level OptLevel) {
2435 return new ARM64DAGToDAGISel(TM, OptLevel);