1 //===-- ARM64ISelDAGToDAG.cpp - A dag to dag inst selector for ARM64 ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM64 target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "arm64-isel"
15 #include "ARM64TargetMachine.h"
16 #include "MCTargetDesc/ARM64AddressingModes.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
28 //===--------------------------------------------------------------------===//
29 /// ARM64DAGToDAGISel - ARM64 specific code to select ARM64 machine
30 /// instructions for SelectionDAG operations.
34 class ARM64DAGToDAGISel : public SelectionDAGISel {
35 ARM64TargetMachine &TM;
37 /// Subtarget - Keep a pointer to the ARM64Subtarget around so that we can
38 /// make the right decision when generating code for different targets.
39 const ARM64Subtarget *Subtarget;
44 explicit ARM64DAGToDAGISel(ARM64TargetMachine &tm, CodeGenOpt::Level OptLevel)
45 : SelectionDAGISel(tm, OptLevel), TM(tm),
46 Subtarget(&TM.getSubtarget<ARM64Subtarget>()), ForCodeSize(false) {}
48 virtual const char *getPassName() const {
49 return "ARM64 Instruction Selection";
52 virtual bool runOnMachineFunction(MachineFunction &MF) {
53 AttributeSet FnAttrs = MF.getFunction()->getAttributes();
55 FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
56 Attribute::OptimizeForSize) ||
57 FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
58 return SelectionDAGISel::runOnMachineFunction(MF);
61 SDNode *Select(SDNode *Node);
63 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
64 /// inline asm expressions.
65 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
67 std::vector<SDValue> &OutOps);
69 SDNode *SelectMLAV64LaneV128(SDNode *N);
70 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
71 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
72 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
73 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
74 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
75 return SelectShiftedRegister(N, false, Reg, Shift);
77 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
78 return SelectShiftedRegister(N, true, Reg, Shift);
80 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
81 return SelectAddrModeIndexed(N, 1, Base, OffImm);
83 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
84 return SelectAddrModeIndexed(N, 2, Base, OffImm);
86 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
87 return SelectAddrModeIndexed(N, 4, Base, OffImm);
89 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
90 return SelectAddrModeIndexed(N, 8, Base, OffImm);
92 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
93 return SelectAddrModeIndexed(N, 16, Base, OffImm);
95 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
96 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
98 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
99 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
101 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
102 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
104 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
105 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
107 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
108 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
111 bool SelectAddrModeRO8(SDValue N, SDValue &Base, SDValue &Offset,
113 return SelectAddrModeRO(N, 1, Base, Offset, Imm);
115 bool SelectAddrModeRO16(SDValue N, SDValue &Base, SDValue &Offset,
117 return SelectAddrModeRO(N, 2, Base, Offset, Imm);
119 bool SelectAddrModeRO32(SDValue N, SDValue &Base, SDValue &Offset,
121 return SelectAddrModeRO(N, 4, Base, Offset, Imm);
123 bool SelectAddrModeRO64(SDValue N, SDValue &Base, SDValue &Offset,
125 return SelectAddrModeRO(N, 8, Base, Offset, Imm);
127 bool SelectAddrModeRO128(SDValue N, SDValue &Base, SDValue &Offset,
129 return SelectAddrModeRO(N, 16, Base, Offset, Imm);
131 bool SelectAddrModeNoIndex(SDValue N, SDValue &Val);
133 /// Form sequences of consecutive 64/128-bit registers for use in NEON
134 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
135 /// between 1 and 4 elements. If it contains a single element that is returned
136 /// unchanged; otherwise a REG_SEQUENCE value is returned.
137 SDValue createDTuple(ArrayRef<SDValue> Vecs);
138 SDValue createQTuple(ArrayRef<SDValue> Vecs);
140 /// Generic helper for the createDTuple/createQTuple
141 /// functions. Those should almost always be called instead.
142 SDValue createTuple(ArrayRef<SDValue> Vecs, unsigned RegClassIDs[],
145 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
147 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
149 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
151 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
153 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
154 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
156 SDNode *SelectSIMDAddSubNarrowing(unsigned IntNo, SDNode *Node);
157 SDNode *SelectSIMDXtnNarrowing(unsigned IntNo, SDNode *Node);
159 SDNode *SelectAtomic(SDNode *Node, unsigned Op8, unsigned Op16, unsigned Op32,
162 SDNode *SelectBitfieldExtractOp(SDNode *N);
163 SDNode *SelectBitfieldInsertOp(SDNode *N);
165 SDNode *SelectLIBM(SDNode *N);
167 // Include the pieces autogenerated from the target description.
168 #include "ARM64GenDAGISel.inc"
171 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
173 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
175 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
177 bool SelectAddrModeRO(SDValue N, unsigned Size, SDValue &Base,
178 SDValue &Offset, SDValue &Imm);
179 bool isWorthFolding(SDValue V) const;
180 bool SelectExtendedSHL(SDValue N, unsigned Size, SDValue &Offset,
183 } // end anonymous namespace
185 /// isIntImmediate - This method tests to see if the node is a constant
186 /// operand. If so Imm will receive the 32-bit value.
187 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
188 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
189 Imm = C->getZExtValue();
195 // isIntImmediate - This method tests to see if a constant operand.
196 // If so Imm will receive the value.
197 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
198 return isIntImmediate(N.getNode(), Imm);
201 // isOpcWithIntImmediate - This method tests to see if the node is a specific
202 // opcode and that it has a immediate integer right operand.
203 // If so Imm will receive the 32 bit value.
204 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
206 return N->getOpcode() == Opc &&
207 isIntImmediate(N->getOperand(1).getNode(), Imm);
210 bool ARM64DAGToDAGISel::SelectAddrModeNoIndex(SDValue N, SDValue &Val) {
211 EVT ValTy = N.getValueType();
212 if (ValTy != MVT::i64)
218 bool ARM64DAGToDAGISel::SelectInlineAsmMemoryOperand(
219 const SDValue &Op, char ConstraintCode, std::vector<SDValue> &OutOps) {
220 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
221 // Require the address to be in a register. That is safe for all ARM64
222 // variants and it is hard to do anything much smarter without knowing
223 // how the operand is used.
224 OutOps.push_back(Op);
228 /// SelectArithImmed - Select an immediate value that can be represented as
229 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
230 /// Val set to the 12-bit value and Shift set to the shifter operand.
231 bool ARM64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
233 // This function is called from the addsub_shifted_imm ComplexPattern,
234 // which lists [imm] as the list of opcode it's interested in, however
235 // we still need to check whether the operand is actually an immediate
236 // here because the ComplexPattern opcode list is only used in
237 // root-level opcode matching.
238 if (!isa<ConstantSDNode>(N.getNode()))
241 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
244 if (Immed >> 12 == 0) {
246 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
252 unsigned ShVal = ARM64_AM::getShifterImm(ARM64_AM::LSL, ShiftAmt);
253 Val = CurDAG->getTargetConstant(Immed, MVT::i32);
254 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
258 /// SelectNegArithImmed - As above, but negates the value before trying to
260 bool ARM64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
262 // This function is called from the addsub_shifted_imm ComplexPattern,
263 // which lists [imm] as the list of opcode it's interested in, however
264 // we still need to check whether the operand is actually an immediate
265 // here because the ComplexPattern opcode list is only used in
266 // root-level opcode matching.
267 if (!isa<ConstantSDNode>(N.getNode()))
270 // The immediate operand must be a 24-bit zero-extended immediate.
271 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
273 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
274 // have the opposite effect on the C flag, so this pattern mustn't match under
275 // those circumstances.
279 if (N.getValueType() == MVT::i32)
280 Immed = ~((uint32_t)Immed) + 1;
282 Immed = ~Immed + 1ULL;
283 if (Immed & 0xFFFFFFFFFF000000ULL)
286 Immed &= 0xFFFFFFULL;
287 return SelectArithImmed(CurDAG->getConstant(Immed, MVT::i32), Val, Shift);
290 /// getShiftTypeForNode - Translate a shift node to the corresponding
292 static ARM64_AM::ShiftType getShiftTypeForNode(SDValue N) {
293 switch (N.getOpcode()) {
295 return ARM64_AM::InvalidShift;
297 return ARM64_AM::LSL;
299 return ARM64_AM::LSR;
301 return ARM64_AM::ASR;
303 return ARM64_AM::ROR;
307 /// \brief Determine wether it is worth to fold V into an extended register.
308 bool ARM64DAGToDAGISel::isWorthFolding(SDValue V) const {
309 // it hurts if the a value is used at least twice, unless we are optimizing
311 if (ForCodeSize || V.hasOneUse())
316 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
317 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
318 /// instructions allow the shifted register to be rotated, but the arithmetic
319 /// instructions do not. The AllowROR parameter specifies whether ROR is
321 bool ARM64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
322 SDValue &Reg, SDValue &Shift) {
323 ARM64_AM::ShiftType ShType = getShiftTypeForNode(N);
324 if (ShType == ARM64_AM::InvalidShift)
326 if (!AllowROR && ShType == ARM64_AM::ROR)
329 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
330 unsigned BitSize = N.getValueType().getSizeInBits();
331 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
332 unsigned ShVal = ARM64_AM::getShifterImm(ShType, Val);
334 Reg = N.getOperand(0);
335 Shift = CurDAG->getTargetConstant(ShVal, MVT::i32);
336 return isWorthFolding(N);
342 /// getExtendTypeForNode - Translate an extend node to the corresponding
343 /// ExtendType value.
344 static ARM64_AM::ExtendType getExtendTypeForNode(SDValue N,
345 bool IsLoadStore = false) {
346 if (N.getOpcode() == ISD::SIGN_EXTEND ||
347 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
349 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
350 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
352 SrcVT = N.getOperand(0).getValueType();
354 if (!IsLoadStore && SrcVT == MVT::i8)
355 return ARM64_AM::SXTB;
356 else if (!IsLoadStore && SrcVT == MVT::i16)
357 return ARM64_AM::SXTH;
358 else if (SrcVT == MVT::i32)
359 return ARM64_AM::SXTW;
360 else if (SrcVT == MVT::i64)
361 return ARM64_AM::SXTX;
363 return ARM64_AM::InvalidExtend;
364 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
365 N.getOpcode() == ISD::ANY_EXTEND) {
366 EVT SrcVT = N.getOperand(0).getValueType();
367 if (!IsLoadStore && SrcVT == MVT::i8)
368 return ARM64_AM::UXTB;
369 else if (!IsLoadStore && SrcVT == MVT::i16)
370 return ARM64_AM::UXTH;
371 else if (SrcVT == MVT::i32)
372 return ARM64_AM::UXTW;
373 else if (SrcVT == MVT::i64)
374 return ARM64_AM::UXTX;
376 return ARM64_AM::InvalidExtend;
377 } else if (N.getOpcode() == ISD::AND) {
378 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
380 return ARM64_AM::InvalidExtend;
381 uint64_t AndMask = CSD->getZExtValue();
385 return ARM64_AM::InvalidExtend;
387 return !IsLoadStore ? ARM64_AM::UXTB : ARM64_AM::InvalidExtend;
389 return !IsLoadStore ? ARM64_AM::UXTH : ARM64_AM::InvalidExtend;
391 return ARM64_AM::UXTW;
395 return ARM64_AM::InvalidExtend;
398 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
399 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
400 if (DL->getOpcode() != ARM64ISD::DUPLANE16 &&
401 DL->getOpcode() != ARM64ISD::DUPLANE32)
404 SDValue SV = DL->getOperand(0);
405 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
408 SDValue EV = SV.getOperand(1);
409 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
412 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
413 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
414 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
415 LaneOp = EV.getOperand(0);
420 // Helper for SelectOpcV64LaneV128 - Recogzine operatinos where one operand is a
421 // high lane extract.
422 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
423 SDValue &LaneOp, int &LaneIdx) {
425 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
427 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
434 /// SelectMLAV64LaneV128 - ARM64 supports 64-bit vector MLAs (v4i16 and v2i32)
435 /// where one multiplicand is a lane in the upper half of a 128-bit vector.
436 /// Recognize and select this so that we don't emit unnecessary lane extracts.
437 SDNode *ARM64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
438 SDValue Op0 = N->getOperand(0);
439 SDValue Op1 = N->getOperand(1);
440 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
441 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
442 int LaneIdx = -1; // Will hold the lane index.
444 if (Op1.getOpcode() != ISD::MUL ||
445 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
448 if (Op1.getOpcode() != ISD::MUL ||
449 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
454 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
456 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
458 unsigned MLAOpc = ~0U;
460 switch (N->getSimpleValueType(0).SimpleTy) {
462 llvm_unreachable("Unrecognized MLA.");
464 MLAOpc = ARM64::MLAv4i16_indexed;
467 MLAOpc = ARM64::MLAv2i32_indexed;
471 return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops);
474 SDNode *ARM64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
479 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
483 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, MVT::i64);
485 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
487 unsigned SMULLOpc = ~0U;
489 if (IntNo == Intrinsic::arm64_neon_smull) {
490 switch (N->getSimpleValueType(0).SimpleTy) {
492 llvm_unreachable("Unrecognized SMULL.");
494 SMULLOpc = ARM64::SMULLv4i16_indexed;
497 SMULLOpc = ARM64::SMULLv2i32_indexed;
500 } else if (IntNo == Intrinsic::arm64_neon_umull) {
501 switch (N->getSimpleValueType(0).SimpleTy) {
503 llvm_unreachable("Unrecognized SMULL.");
505 SMULLOpc = ARM64::UMULLv4i16_indexed;
508 SMULLOpc = ARM64::UMULLv2i32_indexed;
512 llvm_unreachable("Unrecognized intrinsic.");
514 return CurDAG->getMachineNode(SMULLOpc, SDLoc(N), N->getValueType(0), Ops);
517 /// SelectArithExtendedRegister - Select a "extended register" operand. This
518 /// operand folds in an extend followed by an optional left shift.
519 bool ARM64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
521 unsigned ShiftVal = 0;
522 ARM64_AM::ExtendType Ext;
524 if (N.getOpcode() == ISD::SHL) {
525 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
528 ShiftVal = CSD->getZExtValue();
529 if ((ShiftVal & 0x3) != ShiftVal)
532 Ext = getExtendTypeForNode(N.getOperand(0));
533 if (Ext == ARM64_AM::InvalidExtend)
536 Reg = N.getOperand(0).getOperand(0);
538 Ext = getExtendTypeForNode(N);
539 if (Ext == ARM64_AM::InvalidExtend)
542 Reg = N.getOperand(0);
545 // ARM64 mandates that the RHS of the operation must use the smallest
546 // register classs that could contain the size being extended from. Thus,
547 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
548 // there might not be an actual 32-bit value in the program. We can
549 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
550 if (Reg.getValueType() == MVT::i64 && Ext != ARM64_AM::UXTX &&
551 Ext != ARM64_AM::SXTX) {
552 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
553 MachineSDNode *Node = CurDAG->getMachineNode(
554 TargetOpcode::EXTRACT_SUBREG, SDLoc(N), MVT::i32, Reg, SubReg);
555 Reg = SDValue(Node, 0);
558 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), MVT::i32);
559 return isWorthFolding(N);
562 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
563 /// immediate" address. The "Size" argument is the size in bytes of the memory
564 /// reference, which determines the scale.
565 bool ARM64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
566 SDValue &Base, SDValue &OffImm) {
567 const TargetLowering *TLI = getTargetLowering();
568 if (N.getOpcode() == ISD::FrameIndex) {
569 int FI = cast<FrameIndexSDNode>(N)->getIndex();
570 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
571 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
575 if (N.getOpcode() == ARM64ISD::ADDlow) {
576 GlobalAddressSDNode *GAN =
577 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
578 Base = N.getOperand(0);
579 OffImm = N.getOperand(1);
583 const GlobalValue *GV = GAN->getGlobal();
584 unsigned Alignment = GV->getAlignment();
585 const DataLayout *DL = TLI->getDataLayout();
586 if (Alignment == 0 && !Subtarget->isTargetDarwin())
587 Alignment = DL->getABITypeAlignment(GV->getType()->getElementType());
589 if (Alignment >= Size)
593 if (CurDAG->isBaseWithConstantOffset(N)) {
594 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
595 int64_t RHSC = (int64_t)RHS->getZExtValue();
596 unsigned Scale = Log2_32(Size);
597 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
598 Base = N.getOperand(0);
599 if (Base.getOpcode() == ISD::FrameIndex) {
600 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
601 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
603 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, MVT::i64);
609 // Before falling back to our general case, check if the unscaled
610 // instructions can handle this. If so, that's preferable.
611 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
614 // Base only. The address will be materialized into a register before
615 // the memory is accessed.
616 // add x0, Xbase, #offset
619 OffImm = CurDAG->getTargetConstant(0, MVT::i64);
623 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
624 /// immediate" address. This should only match when there is an offset that
625 /// is not valid for a scaled immediate addressing mode. The "Size" argument
626 /// is the size in bytes of the memory reference, which is needed here to know
627 /// what is valid for a scaled immediate.
628 bool ARM64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
629 SDValue &Base, SDValue &OffImm) {
630 if (!CurDAG->isBaseWithConstantOffset(N))
632 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
633 int64_t RHSC = RHS->getSExtValue();
634 // If the offset is valid as a scaled immediate, don't match here.
635 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
636 RHSC < (0x1000 << Log2_32(Size)))
638 if (RHSC >= -256 && RHSC < 256) {
639 Base = N.getOperand(0);
640 if (Base.getOpcode() == ISD::FrameIndex) {
641 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
642 const TargetLowering *TLI = getTargetLowering();
643 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
645 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i64);
652 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
653 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
654 SDValue ImpDef = SDValue(
655 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SDLoc(N), MVT::i64),
657 MachineSDNode *Node = CurDAG->getMachineNode(
658 TargetOpcode::INSERT_SUBREG, SDLoc(N), MVT::i64, ImpDef, N, SubReg);
659 return SDValue(Node, 0);
662 static SDValue WidenIfNeeded(SelectionDAG *CurDAG, SDValue N) {
663 if (N.getValueType() == MVT::i32) {
664 return Widen(CurDAG, N);
670 /// \brief Check if the given SHL node (\p N), can be used to form an
671 /// extended register for an addressing mode.
672 bool ARM64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
673 SDValue &Offset, SDValue &Imm) {
674 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
675 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
676 if (CSD && (CSD->getZExtValue() & 0x7) == CSD->getZExtValue()) {
678 ARM64_AM::ExtendType Ext = getExtendTypeForNode(N.getOperand(0), true);
679 if (Ext == ARM64_AM::InvalidExtend) {
680 Ext = ARM64_AM::UXTX;
681 Offset = WidenIfNeeded(CurDAG, N.getOperand(0));
683 Offset = WidenIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
686 unsigned LegalShiftVal = Log2_32(Size);
687 unsigned ShiftVal = CSD->getZExtValue();
689 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
692 Imm = CurDAG->getTargetConstant(
693 ARM64_AM::getMemExtendImm(Ext, ShiftVal != 0), MVT::i32);
694 if (isWorthFolding(N))
700 bool ARM64DAGToDAGISel::SelectAddrModeRO(SDValue N, unsigned Size,
701 SDValue &Base, SDValue &Offset,
703 if (N.getOpcode() != ISD::ADD)
705 SDValue LHS = N.getOperand(0);
706 SDValue RHS = N.getOperand(1);
708 // We don't want to match immediate adds here, because they are better lowered
709 // to the register-immediate addressing modes.
710 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
713 // Check if this particular node is reused in any non-memory related
714 // operation. If yes, do not try to fold this node into the address
715 // computation, since the computation will be kept.
716 const SDNode *Node = N.getNode();
717 for (SDNode::use_iterator UI = Node->use_begin(), UE = Node->use_end();
719 if (!isa<MemSDNode>(*UI))
723 // Remember if it is worth folding N when it produces extended register.
724 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
726 // Try to match a shifted extend on the RHS.
727 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
728 SelectExtendedSHL(RHS, Size, Offset, Imm)) {
733 // Try to match a shifted extend on the LHS.
734 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
735 SelectExtendedSHL(LHS, Size, Offset, Imm)) {
740 ARM64_AM::ExtendType Ext = ARM64_AM::UXTX;
741 // Try to match an unshifted extend on the LHS.
742 if (IsExtendedRegisterWorthFolding &&
743 (Ext = getExtendTypeForNode(LHS, true)) != ARM64_AM::InvalidExtend) {
745 Offset = WidenIfNeeded(CurDAG, LHS.getOperand(0));
746 Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
748 if (isWorthFolding(LHS))
752 // Try to match an unshifted extend on the RHS.
753 if (IsExtendedRegisterWorthFolding &&
754 (Ext = getExtendTypeForNode(RHS, true)) != ARM64_AM::InvalidExtend) {
756 Offset = WidenIfNeeded(CurDAG, RHS.getOperand(0));
757 Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
759 if (isWorthFolding(RHS))
763 // Match any non-shifted, non-extend, non-immediate add expression.
765 Offset = WidenIfNeeded(CurDAG, RHS);
766 Ext = ARM64_AM::UXTX;
767 Imm = CurDAG->getTargetConstant(ARM64_AM::getMemExtendImm(Ext, false),
769 // Reg1 + Reg2 is free: no check needed.
773 SDValue ARM64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
774 static unsigned RegClassIDs[] = { ARM64::DDRegClassID, ARM64::DDDRegClassID,
775 ARM64::DDDDRegClassID };
776 static unsigned SubRegs[] = { ARM64::dsub0, ARM64::dsub1,
777 ARM64::dsub2, ARM64::dsub3 };
779 return createTuple(Regs, RegClassIDs, SubRegs);
782 SDValue ARM64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
783 static unsigned RegClassIDs[] = { ARM64::QQRegClassID, ARM64::QQQRegClassID,
784 ARM64::QQQQRegClassID };
785 static unsigned SubRegs[] = { ARM64::qsub0, ARM64::qsub1,
786 ARM64::qsub2, ARM64::qsub3 };
788 return createTuple(Regs, RegClassIDs, SubRegs);
791 SDValue ARM64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
792 unsigned RegClassIDs[],
793 unsigned SubRegs[]) {
794 // There's no special register-class for a vector-list of 1 element: it's just
796 if (Regs.size() == 1)
799 assert(Regs.size() >= 2 && Regs.size() <= 4);
801 SDLoc DL(Regs[0].getNode());
803 SmallVector<SDValue, 4> Ops;
805 // First operand of REG_SEQUENCE is the desired RegClass.
807 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], MVT::i32));
809 // Then we get pairs of source & subregister-position for the components.
810 for (unsigned i = 0; i < Regs.size(); ++i) {
811 Ops.push_back(Regs[i]);
812 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], MVT::i32));
816 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
817 return SDValue(N, 0);
820 SDNode *ARM64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
821 unsigned Opc, bool isExt) {
823 EVT VT = N->getValueType(0);
825 unsigned ExtOff = isExt;
827 // Form a REG_SEQUENCE to force register allocation.
828 unsigned Vec0Off = ExtOff + 1;
829 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
830 N->op_begin() + Vec0Off + NumVecs);
831 SDValue RegSeq = createQTuple(Regs);
833 SmallVector<SDValue, 6> Ops;
835 Ops.push_back(N->getOperand(1));
836 Ops.push_back(RegSeq);
837 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
838 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
841 SDNode *ARM64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
842 LoadSDNode *LD = cast<LoadSDNode>(N);
843 if (LD->isUnindexed())
845 EVT VT = LD->getMemoryVT();
846 EVT DstVT = N->getValueType(0);
847 ISD::MemIndexedMode AM = LD->getAddressingMode();
848 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
850 // We're not doing validity checking here. That was done when checking
851 // if we should mark the load as indexed or not. We're just selecting
852 // the right instruction.
855 ISD::LoadExtType ExtType = LD->getExtensionType();
856 bool InsertTo64 = false;
858 Opcode = IsPre ? ARM64::LDRXpre_isel : ARM64::LDRXpost_isel;
859 else if (VT == MVT::i32) {
860 if (ExtType == ISD::NON_EXTLOAD)
861 Opcode = IsPre ? ARM64::LDRWpre_isel : ARM64::LDRWpost_isel;
862 else if (ExtType == ISD::SEXTLOAD)
863 Opcode = IsPre ? ARM64::LDRSWpre_isel : ARM64::LDRSWpost_isel;
865 Opcode = IsPre ? ARM64::LDRWpre_isel : ARM64::LDRWpost_isel;
867 // The result of the load is only i32. It's the subreg_to_reg that makes
871 } else if (VT == MVT::i16) {
872 if (ExtType == ISD::SEXTLOAD) {
873 if (DstVT == MVT::i64)
874 Opcode = IsPre ? ARM64::LDRSHXpre_isel : ARM64::LDRSHXpost_isel;
876 Opcode = IsPre ? ARM64::LDRSHWpre_isel : ARM64::LDRSHWpost_isel;
878 Opcode = IsPre ? ARM64::LDRHHpre_isel : ARM64::LDRHHpost_isel;
879 InsertTo64 = DstVT == MVT::i64;
880 // The result of the load is only i32. It's the subreg_to_reg that makes
884 } else if (VT == MVT::i8) {
885 if (ExtType == ISD::SEXTLOAD) {
886 if (DstVT == MVT::i64)
887 Opcode = IsPre ? ARM64::LDRSBXpre_isel : ARM64::LDRSBXpost_isel;
889 Opcode = IsPre ? ARM64::LDRSBWpre_isel : ARM64::LDRSBWpost_isel;
891 Opcode = IsPre ? ARM64::LDRBBpre_isel : ARM64::LDRBBpost_isel;
892 InsertTo64 = DstVT == MVT::i64;
893 // The result of the load is only i32. It's the subreg_to_reg that makes
897 } else if (VT == MVT::f32) {
898 Opcode = IsPre ? ARM64::LDRSpre_isel : ARM64::LDRSpost_isel;
899 } else if (VT == MVT::f64) {
900 Opcode = IsPre ? ARM64::LDRDpre_isel : ARM64::LDRDpost_isel;
903 SDValue Chain = LD->getChain();
904 SDValue Base = LD->getBasePtr();
905 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
906 int OffsetVal = (int)OffsetOp->getZExtValue();
907 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, MVT::i64);
908 SDValue Ops[] = { Base, Offset, Chain };
909 SDNode *Res = CurDAG->getMachineNode(Opcode, SDLoc(N), DstVT, MVT::i64,
911 // Either way, we're replacing the node, so tell the caller that.
914 SDValue SubReg = CurDAG->getTargetConstant(ARM64::sub_32, MVT::i32);
915 SDNode *Sub = CurDAG->getMachineNode(
916 ARM64::SUBREG_TO_REG, SDLoc(N), MVT::i64,
917 CurDAG->getTargetConstant(0, MVT::i64), SDValue(Res, 0), SubReg);
918 ReplaceUses(SDValue(N, 0), SDValue(Sub, 0));
919 ReplaceUses(SDValue(N, 1), SDValue(Res, 1));
920 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
926 SDNode *ARM64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
927 unsigned SubRegIdx) {
929 EVT VT = N->getValueType(0);
930 SDValue Chain = N->getOperand(0);
932 SmallVector<SDValue, 6> Ops;
933 Ops.push_back(N->getOperand(2)); // Mem operand;
934 Ops.push_back(Chain);
936 std::vector<EVT> ResTys;
937 ResTys.push_back(MVT::Untyped);
938 ResTys.push_back(MVT::Other);
940 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
941 SDValue SuperReg = SDValue(Ld, 0);
943 // MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
944 // MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
945 // cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
949 ReplaceUses(SDValue(N, 3), CurDAG->getTargetExtractSubreg(SubRegIdx + 3, dl,
953 ReplaceUses(SDValue(N, 2), CurDAG->getTargetExtractSubreg(SubRegIdx + 2, dl,
957 ReplaceUses(SDValue(N, 1), CurDAG->getTargetExtractSubreg(SubRegIdx + 1, dl,
959 ReplaceUses(SDValue(N, 0),
960 CurDAG->getTargetExtractSubreg(SubRegIdx, dl, VT, SuperReg));
963 ReplaceUses(SDValue(N, 0), SuperReg);
967 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
972 SDNode *ARM64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
975 EVT VT = N->getOperand(2)->getValueType(0);
977 // Form a REG_SEQUENCE to force register allocation.
978 bool Is128Bit = VT.getSizeInBits() == 128;
979 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
980 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
982 SmallVector<SDValue, 6> Ops;
983 Ops.push_back(RegSeq);
984 Ops.push_back(N->getOperand(NumVecs + 2));
985 Ops.push_back(N->getOperand(0));
986 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
991 /// WidenVector - Given a value in the V64 register class, produce the
992 /// equivalent value in the V128 register class.
997 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
999 SDValue operator()(SDValue V64Reg) {
1000 EVT VT = V64Reg.getValueType();
1001 unsigned NarrowSize = VT.getVectorNumElements();
1002 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1003 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1007 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1008 return DAG.getTargetInsertSubreg(ARM64::dsub, DL, WideTy, Undef, V64Reg);
1012 /// NarrowVector - Given a value in the V128 register class, produce the
1013 /// equivalent value in the V64 register class.
1014 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1015 EVT VT = V128Reg.getValueType();
1016 unsigned WideSize = VT.getVectorNumElements();
1017 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1018 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1020 return DAG.getTargetExtractSubreg(ARM64::dsub, SDLoc(V128Reg), NarrowTy,
1024 SDNode *ARM64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1027 EVT VT = N->getValueType(0);
1028 bool Narrow = VT.getSizeInBits() == 64;
1030 // Form a REG_SEQUENCE to force register allocation.
1031 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1034 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1035 WidenVector(*CurDAG));
1037 SDValue RegSeq = createQTuple(Regs);
1039 std::vector<EVT> ResTys;
1040 ResTys.push_back(MVT::Untyped);
1041 ResTys.push_back(MVT::Other);
1044 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1046 SmallVector<SDValue, 6> Ops;
1047 Ops.push_back(RegSeq);
1048 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1049 Ops.push_back(N->getOperand(NumVecs + 3));
1050 Ops.push_back(N->getOperand(0));
1051 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1052 SDValue SuperReg = SDValue(Ld, 0);
1054 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1058 CurDAG->getTargetExtractSubreg(ARM64::qsub3, dl, WideVT, SuperReg);
1060 ReplaceUses(SDValue(N, 3), NarrowVector(NV3, *CurDAG));
1062 ReplaceUses(SDValue(N, 3), NV3);
1067 CurDAG->getTargetExtractSubreg(ARM64::qsub2, dl, WideVT, SuperReg);
1069 ReplaceUses(SDValue(N, 2), NarrowVector(NV2, *CurDAG));
1071 ReplaceUses(SDValue(N, 2), NV2);
1076 CurDAG->getTargetExtractSubreg(ARM64::qsub1, dl, WideVT, SuperReg);
1078 CurDAG->getTargetExtractSubreg(ARM64::qsub0, dl, WideVT, SuperReg);
1080 ReplaceUses(SDValue(N, 1), NarrowVector(NV1, *CurDAG));
1081 ReplaceUses(SDValue(N, 0), NarrowVector(NV0, *CurDAG));
1083 ReplaceUses(SDValue(N, 1), NV1);
1084 ReplaceUses(SDValue(N, 0), NV0);
1090 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1095 SDNode *ARM64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1098 EVT VT = N->getOperand(2)->getValueType(0);
1099 bool Narrow = VT.getSizeInBits() == 64;
1101 // Form a REG_SEQUENCE to force register allocation.
1102 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1105 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1106 WidenVector(*CurDAG));
1108 SDValue RegSeq = createQTuple(Regs);
1111 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1113 SmallVector<SDValue, 6> Ops;
1114 Ops.push_back(RegSeq);
1115 Ops.push_back(CurDAG->getTargetConstant(LaneNo, MVT::i64));
1116 Ops.push_back(N->getOperand(NumVecs + 3));
1117 Ops.push_back(N->getOperand(0));
1118 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1120 // Transfer memoperands.
1121 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1122 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1123 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1128 SDNode *ARM64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
1129 unsigned Op16, unsigned Op32,
1131 // Mostly direct translation to the given operations, except that we preserve
1132 // the AtomicOrdering for use later on.
1133 AtomicSDNode *AN = cast<AtomicSDNode>(Node);
1134 EVT VT = AN->getMemoryVT();
1139 else if (VT == MVT::i16)
1141 else if (VT == MVT::i32)
1143 else if (VT == MVT::i64)
1146 llvm_unreachable("Unexpected atomic operation");
1148 SmallVector<SDValue, 4> Ops;
1149 for (unsigned i = 1; i < AN->getNumOperands(); ++i)
1150 Ops.push_back(AN->getOperand(i));
1152 Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
1153 Ops.push_back(AN->getOperand(0)); // Chain moves to the end
1155 return CurDAG->SelectNodeTo(Node, Op, AN->getValueType(0), MVT::Other,
1156 &Ops[0], Ops.size());
1159 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1160 unsigned &Opc, SDValue &Opd0,
1161 unsigned &LSB, unsigned &MSB,
1162 unsigned NumberOfIgnoredLowBits,
1163 bool BiggerPattern) {
1164 assert(N->getOpcode() == ISD::AND &&
1165 "N must be a AND operation to call this function");
1167 EVT VT = N->getValueType(0);
1169 // Here we can test the type of VT and return false when the type does not
1170 // match, but since it is done prior to that call in the current context
1171 // we turned that into an assert to avoid redundant code.
1172 assert((VT == MVT::i32 || VT == MVT::i64) &&
1173 "Type checking must have been done before calling this function");
1175 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1176 // changed the AND node to a 32-bit mask operation. We'll have to
1177 // undo that as part of the transform here if we want to catch all
1178 // the opportunities.
1179 // Currently the NumberOfIgnoredLowBits argument helps to recover
1180 // form these situations when matching bigger pattern (bitfield insert).
1182 // For unsigned extracts, check for a shift right and mask
1183 uint64_t And_imm = 0;
1184 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1187 const SDNode *Op0 = N->getOperand(0).getNode();
1189 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1190 // simplified. Try to undo that
1191 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1193 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1194 if (And_imm & (And_imm + 1))
1197 bool ClampMSB = false;
1198 uint64_t Srl_imm = 0;
1199 // Handle the SRL + ANY_EXTEND case.
1200 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1201 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1202 // Extend the incoming operand of the SRL to 64-bit.
1203 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1204 // Make sure to clamp the MSB so that we preserve the semantics of the
1205 // original operations.
1207 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1208 Opd0 = Op0->getOperand(0);
1209 } else if (BiggerPattern) {
1210 // Let's pretend a 0 shift right has been performed.
1211 // The resulting code will be at least as good as the original one
1212 // plus it may expose more opportunities for bitfield insert pattern.
1213 // FIXME: Currently we limit this to the bigger pattern, because
1214 // some optimizations expect AND and not UBFM
1215 Opd0 = N->getOperand(0);
1219 assert((BiggerPattern || (Srl_imm > 0 && Srl_imm < VT.getSizeInBits())) &&
1220 "bad amount in shift node!");
1223 MSB = Srl_imm + (VT == MVT::i32 ? CountTrailingOnes_32(And_imm)
1224 : CountTrailingOnes_64(And_imm)) -
1227 // Since we're moving the extend before the right shift operation, we need
1228 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1229 // the zeros which would get shifted in with the original right shift
1231 MSB = MSB > 31 ? 31 : MSB;
1233 Opc = VT == MVT::i32 ? ARM64::UBFMWri : ARM64::UBFMXri;
1237 static bool isOneBitExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1238 unsigned &LSB, unsigned &MSB) {
1239 // We are looking for the following pattern which basically extracts a single
1240 // bit from the source value and places it in the LSB of the destination
1241 // value, all other bits of the destination value or set to zero:
1243 // Value2 = AND Value, MaskImm
1244 // SRL Value2, ShiftImm
1246 // with MaskImm >> ShiftImm == 1.
1248 // This gets selected into a single UBFM:
1250 // UBFM Value, ShiftImm, ShiftImm
1253 if (N->getOpcode() != ISD::SRL)
1256 uint64_t And_mask = 0;
1257 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1260 Opd0 = N->getOperand(0).getOperand(0);
1262 uint64_t Srl_imm = 0;
1263 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1266 // Check whether we really have a one bit extract here.
1267 if (And_mask >> Srl_imm == 0x1) {
1268 if (N->getValueType(0) == MVT::i32)
1269 Opc = ARM64::UBFMWri;
1271 Opc = ARM64::UBFMXri;
1273 LSB = MSB = Srl_imm;
1281 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1282 unsigned &LSB, unsigned &MSB,
1283 bool BiggerPattern) {
1284 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1285 "N must be a SHR/SRA operation to call this function");
1287 EVT VT = N->getValueType(0);
1289 // Here we can test the type of VT and return false when the type does not
1290 // match, but since it is done prior to that call in the current context
1291 // we turned that into an assert to avoid redundant code.
1292 assert((VT == MVT::i32 || VT == MVT::i64) &&
1293 "Type checking must have been done before calling this function");
1295 // Check for AND + SRL doing a one bit extract.
1296 if (isOneBitExtractOpFromShr(N, Opc, Opd0, LSB, MSB))
1299 // we're looking for a shift of a shift
1300 uint64_t Shl_imm = 0;
1301 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1302 Opd0 = N->getOperand(0).getOperand(0);
1303 } else if (BiggerPattern) {
1304 // Let's pretend a 0 shift left has been performed.
1305 // FIXME: Currently we limit this to the bigger pattern case,
1306 // because some optimizations expect AND and not UBFM
1307 Opd0 = N->getOperand(0);
1311 assert(Shl_imm >= 0 && Shl_imm < VT.getSizeInBits() &&
1312 "bad amount in shift node!");
1313 uint64_t Srl_imm = 0;
1314 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1317 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1318 "bad amount in shift node!");
1319 // Note: The width operand is encoded as width-1.
1320 unsigned Width = VT.getSizeInBits() - Srl_imm - 1;
1321 int sLSB = Srl_imm - Shl_imm;
1326 // SRA requires a signed extraction
1328 Opc = N->getOpcode() == ISD::SRA ? ARM64::SBFMWri : ARM64::UBFMWri;
1330 Opc = N->getOpcode() == ISD::SRA ? ARM64::SBFMXri : ARM64::UBFMXri;
1334 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1335 SDValue &Opd0, unsigned &LSB, unsigned &MSB,
1336 unsigned NumberOfIgnoredLowBits = 0,
1337 bool BiggerPattern = false) {
1338 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1341 switch (N->getOpcode()) {
1343 if (!N->isMachineOpcode())
1347 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, LSB, MSB,
1348 NumberOfIgnoredLowBits, BiggerPattern);
1351 return isBitfieldExtractOpFromShr(N, Opc, Opd0, LSB, MSB, BiggerPattern);
1354 unsigned NOpc = N->getMachineOpcode();
1358 case ARM64::SBFMWri:
1359 case ARM64::UBFMWri:
1360 case ARM64::SBFMXri:
1361 case ARM64::UBFMXri:
1363 Opd0 = N->getOperand(0);
1364 LSB = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1365 MSB = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1372 SDNode *ARM64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1373 unsigned Opc, LSB, MSB;
1375 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, LSB, MSB))
1378 EVT VT = N->getValueType(0);
1379 SDValue Ops[] = { Opd0, CurDAG->getTargetConstant(LSB, VT),
1380 CurDAG->getTargetConstant(MSB, VT) };
1381 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 3);
1384 // Is mask a i32 or i64 binary sequence 1..10..0 and
1385 // CountTrailingZeros(mask) == ExpectedTrailingZeros
1386 static bool isHighMask(uint64_t Mask, unsigned ExpectedTrailingZeros,
1387 unsigned NumberOfIgnoredHighBits, EVT VT) {
1388 assert((VT == MVT::i32 || VT == MVT::i64) &&
1389 "i32 or i64 mask type expected!");
1391 uint64_t ExpectedMask;
1392 if (VT == MVT::i32) {
1393 uint32_t ExpectedMaski32 = ~0 << ExpectedTrailingZeros;
1394 ExpectedMask = ExpectedMaski32;
1395 if (NumberOfIgnoredHighBits) {
1396 uint32_t highMask = ~0 << (32 - NumberOfIgnoredHighBits);
1400 ExpectedMask = ((uint64_t) ~0) << ExpectedTrailingZeros;
1401 if (NumberOfIgnoredHighBits)
1402 Mask |= ((uint64_t) ~0) << (64 - NumberOfIgnoredHighBits);
1405 return Mask == ExpectedMask;
1408 // Look for bits that will be useful for later uses.
1409 // A bit is consider useless as soon as it is dropped and never used
1410 // before it as been dropped.
1411 // E.g., looking for useful bit of x
1414 // After #1, x useful bits are 0x7, then the useful bits of x, live through
1416 // After #2, the useful bits of x are 0x4.
1417 // However, if x is used on an unpredicatable instruction, then all its bits
1423 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1425 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1428 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1429 Imm = ARM64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1430 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1431 getUsefulBits(Op, UsefulBits, Depth + 1);
1434 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1435 uint64_t Imm, uint64_t MSB,
1437 // inherit the bitwidth value
1438 APInt OpUsefulBits(UsefulBits);
1442 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1444 // The interesting part will be in the lower part of the result
1445 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1446 // The interesting part was starting at Imm in the argument
1447 OpUsefulBits = OpUsefulBits.shl(Imm);
1449 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1451 // The interesting part will be shifted in the result
1452 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1453 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1454 // The interesting part was at zero in the argument
1455 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1458 UsefulBits &= OpUsefulBits;
1461 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1464 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1466 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1468 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1471 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1473 uint64_t ShiftTypeAndValue =
1474 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1475 APInt Mask(UsefulBits);
1476 Mask.clearAllBits();
1479 if (ARM64_AM::getShiftType(ShiftTypeAndValue) == ARM64_AM::LSL) {
1481 uint64_t ShiftAmt = ARM64_AM::getShiftValue(ShiftTypeAndValue);
1482 Mask = Mask.shl(ShiftAmt);
1483 getUsefulBits(Op, Mask, Depth + 1);
1484 Mask = Mask.lshr(ShiftAmt);
1485 } else if (ARM64_AM::getShiftType(ShiftTypeAndValue) == ARM64_AM::LSR) {
1487 // We do not handle ARM64_AM::ASR, because the sign will change the
1488 // number of useful bits
1489 uint64_t ShiftAmt = ARM64_AM::getShiftValue(ShiftTypeAndValue);
1490 Mask = Mask.lshr(ShiftAmt);
1491 getUsefulBits(Op, Mask, Depth + 1);
1492 Mask = Mask.shl(ShiftAmt);
1499 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1502 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1504 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1506 if (Op.getOperand(1) == Orig)
1507 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1509 APInt OpUsefulBits(UsefulBits);
1513 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1515 UsefulBits &= ~OpUsefulBits;
1516 getUsefulBits(Op, UsefulBits, Depth + 1);
1518 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1520 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1521 getUsefulBits(Op, UsefulBits, Depth + 1);
1525 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1526 SDValue Orig, unsigned Depth) {
1528 // Users of this node should have already been instruction selected
1529 // FIXME: Can we turn that into an assert?
1530 if (!UserNode->isMachineOpcode())
1533 switch (UserNode->getMachineOpcode()) {
1536 case ARM64::ANDSWri:
1537 case ARM64::ANDSXri:
1540 // We increment Depth only when we call the getUsefulBits
1541 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1543 case ARM64::UBFMWri:
1544 case ARM64::UBFMXri:
1545 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1549 if (UserNode->getOperand(1) != Orig)
1551 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1555 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1559 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1562 // Initialize UsefulBits
1564 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1565 // At the beginning, assume every produced bits is useful
1566 UsefulBits = APInt(Bitwidth, 0);
1567 UsefulBits.flipAllBits();
1569 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1571 for (SDNode::use_iterator UseIt = Op.getNode()->use_begin(),
1572 UseEnd = Op.getNode()->use_end();
1573 UseIt != UseEnd; ++UseIt) {
1574 // A use cannot produce useful bits
1575 APInt UsefulBitsForUse = APInt(UsefulBits);
1576 getUsefulBitsForUse(*UseIt, UsefulBitsForUse, Op, Depth);
1577 UsersUsefulBits |= UsefulBitsForUse;
1579 // UsefulBits contains the produced bits that are meaningful for the
1580 // current definition, thus a user cannot make a bit meaningful at
1582 UsefulBits &= UsersUsefulBits;
1585 // Given a OR operation, check if we have the following pattern
1586 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
1587 // isBitfieldExtractOp)
1588 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1589 // countTrailingZeros(mask2) == imm2 - imm + 1
1591 // if yes, given reference arguments will be update so that one can replace
1592 // the OR instruction with:
1593 // f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1594 static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1595 SDValue &Opd1, unsigned &LSB,
1596 unsigned &MSB, SelectionDAG *CurDAG) {
1597 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1600 EVT VT = N->getValueType(0);
1602 Opc = ARM64::BFMWri;
1603 else if (VT == MVT::i64)
1604 Opc = ARM64::BFMXri;
1608 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1609 // have the expected shape. Try to undo that.
1611 getUsefulBits(SDValue(N, 0), UsefulBits);
1613 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1614 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1616 // OR is commutative, check both possibilities (does llvm provide a
1617 // way to do that directely, e.g., via code matcher?)
1618 SDValue OrOpd1Val = N->getOperand(1);
1619 SDNode *OrOpd0 = N->getOperand(0).getNode();
1620 SDNode *OrOpd1 = N->getOperand(1).getNode();
1621 for (int i = 0; i < 2;
1622 ++i, std::swap(OrOpd0, OrOpd1), OrOpd1Val = N->getOperand(0)) {
1624 // Set Opd1, LSB and MSB arguments by looking for
1625 // c = ubfm b, imm, imm2
1626 if (!isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Opd1, LSB, MSB,
1627 NumberOfIgnoredLowBits, true))
1630 // Check that the returned opcode is compatible with the pattern,
1631 // i.e., same type and zero extended (U and not S)
1632 if ((BFXOpc != ARM64::UBFMXri && VT == MVT::i64) ||
1633 (BFXOpc != ARM64::UBFMWri && VT == MVT::i32))
1636 // Compute the width of the bitfield insertion
1637 int sMSB = MSB - LSB + 1;
1638 // FIXME: This constraints is to catch bitfield insertion we may
1639 // want to widen the pattern if we want to grab general bitfied
1644 // Check the second part of the pattern
1645 EVT VT = OrOpd1->getValueType(0);
1646 if (VT != MVT::i32 && VT != MVT::i64)
1649 // Compute the Known Zero for the candidate of the first operand.
1650 // This allows to catch more general case than just looking for
1651 // AND with imm. Indeed, simplify-demanded-bits may have removed
1652 // the AND instruction because it proves it was useless.
1653 APInt KnownZero, KnownOne;
1654 CurDAG->ComputeMaskedBits(OrOpd1Val, KnownZero, KnownOne);
1656 // Check if there is enough room for the second operand to appear
1658 if (KnownZero.countTrailingOnes() < (unsigned)sMSB)
1661 // Set the first operand
1663 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
1664 isHighMask(Imm, sMSB, NumberOfIgnoredHighBits, VT))
1665 // In that case, we can eliminate the AND
1666 Opd0 = OrOpd1->getOperand(0);
1668 // Maybe the AND has been removed by simplify-demanded-bits
1669 // or is useful because it discards more bits
1679 SDNode *ARM64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
1680 if (N->getOpcode() != ISD::OR)
1687 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
1690 EVT VT = N->getValueType(0);
1691 SDValue Ops[] = { Opd0,
1693 CurDAG->getTargetConstant(LSB, VT),
1694 CurDAG->getTargetConstant(MSB, VT) };
1695 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 4);
1698 SDNode *ARM64DAGToDAGISel::SelectLIBM(SDNode *N) {
1699 EVT VT = N->getValueType(0);
1702 unsigned FRINTXOpcs[] = { ARM64::FRINTXSr, ARM64::FRINTXDr };
1704 if (VT == MVT::f32) {
1706 } else if (VT == MVT::f64) {
1709 return 0; // Unrecognized argument type. Fall back on default codegen.
1711 // Pick the FRINTX variant needed to set the flags.
1712 unsigned FRINTXOpc = FRINTXOpcs[Variant];
1714 switch (N->getOpcode()) {
1716 return 0; // Unrecognized libm ISD node. Fall back on default codegen.
1718 unsigned FRINTPOpcs[] = { ARM64::FRINTPSr, ARM64::FRINTPDr };
1719 Opc = FRINTPOpcs[Variant];
1723 unsigned FRINTMOpcs[] = { ARM64::FRINTMSr, ARM64::FRINTMDr };
1724 Opc = FRINTMOpcs[Variant];
1728 unsigned FRINTZOpcs[] = { ARM64::FRINTZSr, ARM64::FRINTZDr };
1729 Opc = FRINTZOpcs[Variant];
1733 unsigned FRINTAOpcs[] = { ARM64::FRINTASr, ARM64::FRINTADr };
1734 Opc = FRINTAOpcs[Variant];
1740 SDValue In = N->getOperand(0);
1741 SmallVector<SDValue, 2> Ops;
1744 if (!TM.Options.UnsafeFPMath) {
1745 SDNode *FRINTX = CurDAG->getMachineNode(FRINTXOpc, dl, VT, MVT::Glue, In);
1746 Ops.push_back(SDValue(FRINTX, 1));
1749 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
1752 SDNode *ARM64DAGToDAGISel::Select(SDNode *Node) {
1753 // Dump information about the Node being selected
1754 DEBUG(errs() << "Selecting: ");
1755 DEBUG(Node->dump(CurDAG));
1756 DEBUG(errs() << "\n");
1758 // If we have a custom node, we already have selected!
1759 if (Node->isMachineOpcode()) {
1760 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
1761 Node->setNodeId(-1);
1765 // Few custom selection stuff.
1766 SDNode *ResNode = 0;
1767 EVT VT = Node->getValueType(0);
1769 switch (Node->getOpcode()) {
1774 if (SDNode *I = SelectMLAV64LaneV128(Node))
1778 case ISD::ATOMIC_LOAD_ADD:
1779 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_ADD_I8,
1780 ARM64::ATOMIC_LOAD_ADD_I16, ARM64::ATOMIC_LOAD_ADD_I32,
1781 ARM64::ATOMIC_LOAD_ADD_I64);
1782 case ISD::ATOMIC_LOAD_SUB:
1783 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_SUB_I8,
1784 ARM64::ATOMIC_LOAD_SUB_I16, ARM64::ATOMIC_LOAD_SUB_I32,
1785 ARM64::ATOMIC_LOAD_SUB_I64);
1786 case ISD::ATOMIC_LOAD_AND:
1787 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_AND_I8,
1788 ARM64::ATOMIC_LOAD_AND_I16, ARM64::ATOMIC_LOAD_AND_I32,
1789 ARM64::ATOMIC_LOAD_AND_I64);
1790 case ISD::ATOMIC_LOAD_OR:
1791 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_OR_I8,
1792 ARM64::ATOMIC_LOAD_OR_I16, ARM64::ATOMIC_LOAD_OR_I32,
1793 ARM64::ATOMIC_LOAD_OR_I64);
1794 case ISD::ATOMIC_LOAD_XOR:
1795 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_XOR_I8,
1796 ARM64::ATOMIC_LOAD_XOR_I16, ARM64::ATOMIC_LOAD_XOR_I32,
1797 ARM64::ATOMIC_LOAD_XOR_I64);
1798 case ISD::ATOMIC_LOAD_NAND:
1799 return SelectAtomic(
1800 Node, ARM64::ATOMIC_LOAD_NAND_I8, ARM64::ATOMIC_LOAD_NAND_I16,
1801 ARM64::ATOMIC_LOAD_NAND_I32, ARM64::ATOMIC_LOAD_NAND_I64);
1802 case ISD::ATOMIC_LOAD_MIN:
1803 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_MIN_I8,
1804 ARM64::ATOMIC_LOAD_MIN_I16, ARM64::ATOMIC_LOAD_MIN_I32,
1805 ARM64::ATOMIC_LOAD_MIN_I64);
1806 case ISD::ATOMIC_LOAD_MAX:
1807 return SelectAtomic(Node, ARM64::ATOMIC_LOAD_MAX_I8,
1808 ARM64::ATOMIC_LOAD_MAX_I16, ARM64::ATOMIC_LOAD_MAX_I32,
1809 ARM64::ATOMIC_LOAD_MAX_I64);
1810 case ISD::ATOMIC_LOAD_UMIN:
1811 return SelectAtomic(
1812 Node, ARM64::ATOMIC_LOAD_UMIN_I8, ARM64::ATOMIC_LOAD_UMIN_I16,
1813 ARM64::ATOMIC_LOAD_UMIN_I32, ARM64::ATOMIC_LOAD_UMIN_I64);
1814 case ISD::ATOMIC_LOAD_UMAX:
1815 return SelectAtomic(
1816 Node, ARM64::ATOMIC_LOAD_UMAX_I8, ARM64::ATOMIC_LOAD_UMAX_I16,
1817 ARM64::ATOMIC_LOAD_UMAX_I32, ARM64::ATOMIC_LOAD_UMAX_I64);
1818 case ISD::ATOMIC_SWAP:
1819 return SelectAtomic(Node, ARM64::ATOMIC_SWAP_I8, ARM64::ATOMIC_SWAP_I16,
1820 ARM64::ATOMIC_SWAP_I32, ARM64::ATOMIC_SWAP_I64);
1821 case ISD::ATOMIC_CMP_SWAP:
1822 return SelectAtomic(Node, ARM64::ATOMIC_CMP_SWAP_I8,
1823 ARM64::ATOMIC_CMP_SWAP_I16, ARM64::ATOMIC_CMP_SWAP_I32,
1824 ARM64::ATOMIC_CMP_SWAP_I64);
1827 // Try to select as an indexed load. Fall through to normal processing
1830 SDNode *I = SelectIndexedLoad(Node, Done);
1836 case ISD::FP16_TO_FP32: {
1837 assert(Node->getOperand(0).getValueType() == MVT::i32 && "vector convert?");
1838 EVT VT = Node->getValueType(0);
1841 CurDAG->getTargetConstant(ARM64::FPR32RegClass.getID(), MVT::i32);
1843 CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, MVT::i32,
1844 Node->getOperand(0), FPR32Id);
1846 CurDAG->getTargetExtractSubreg(ARM64::hsub, DL, VT, SDValue(Res, 0));
1847 return CurDAG->getMachineNode(ARM64::FCVTSHr, DL, VT, FPR16Reg);
1852 if (SDNode *I = SelectBitfieldExtractOp(Node))
1857 if (SDNode *I = SelectBitfieldInsertOp(Node))
1861 case ISD::EXTRACT_VECTOR_ELT: {
1862 // Extracting lane zero is a special case where we can just use a plain
1863 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
1864 // the rest of the compiler, especially the register allocator and copyi
1865 // propagation, to reason about, so is preferred when it's possible to
1867 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
1868 // Bail and use the default Select() for non-zero lanes.
1869 if (LaneNode->getZExtValue() != 0)
1871 // If the element type is not the same as the result type, likewise
1872 // bail and use the default Select(), as there's more to do than just
1873 // a cross-class COPY. This catches extracts of i8 and i16 elements
1874 // since they will need an explicit zext.
1875 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
1878 switch (Node->getOperand(0)
1880 .getVectorElementType()
1883 assert("Unexpected vector element type!");
1885 SubReg = ARM64::dsub;
1888 SubReg = ARM64::ssub;
1890 case 16: // FALLTHROUGH
1892 llvm_unreachable("unexpected zext-requiring extract element!");
1894 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
1895 Node->getOperand(0));
1896 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
1897 DEBUG(Extract->dumpr(CurDAG));
1898 DEBUG(dbgs() << "\n");
1899 return Extract.getNode();
1901 case ISD::Constant: {
1902 // Materialize zero constants as copies from WZR/XZR. This allows
1903 // the coalescer to propagate these into other instructions.
1904 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
1905 if (ConstNode->isNullValue()) {
1907 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
1908 ARM64::WZR, MVT::i32).getNode();
1909 else if (VT == MVT::i64)
1910 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
1911 ARM64::XZR, MVT::i64).getNode();
1916 case ISD::FrameIndex: {
1917 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
1918 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
1919 unsigned Shifter = ARM64_AM::getShifterImm(ARM64_AM::LSL, 0);
1920 const TargetLowering *TLI = getTargetLowering();
1921 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy());
1922 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
1923 CurDAG->getTargetConstant(Shifter, MVT::i32) };
1924 return CurDAG->SelectNodeTo(Node, ARM64::ADDXri, MVT::i64, Ops, 3);
1926 case ISD::INTRINSIC_W_CHAIN: {
1927 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1931 case Intrinsic::arm64_ldxp: {
1932 SDValue MemAddr = Node->getOperand(2);
1934 SDValue Chain = Node->getOperand(0);
1936 SDNode *Ld = CurDAG->getMachineNode(ARM64::LDXPX, DL, MVT::i64, MVT::i64,
1937 MVT::Other, MemAddr, Chain);
1939 // Transfer memoperands.
1940 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1941 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
1942 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
1945 case Intrinsic::arm64_stxp: {
1947 SDValue Chain = Node->getOperand(0);
1948 SDValue ValLo = Node->getOperand(2);
1949 SDValue ValHi = Node->getOperand(3);
1950 SDValue MemAddr = Node->getOperand(4);
1952 // Place arguments in the right order.
1953 SmallVector<SDValue, 7> Ops;
1954 Ops.push_back(ValLo);
1955 Ops.push_back(ValHi);
1956 Ops.push_back(MemAddr);
1957 Ops.push_back(Chain);
1960 CurDAG->getMachineNode(ARM64::STXPX, DL, MVT::i32, MVT::Other, Ops);
1961 // Transfer memoperands.
1962 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1963 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
1964 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1968 case Intrinsic::arm64_neon_ld1x2:
1969 if (VT == MVT::v8i8)
1970 return SelectLoad(Node, 2, ARM64::LD1Twov8b, ARM64::dsub0);
1971 else if (VT == MVT::v16i8)
1972 return SelectLoad(Node, 2, ARM64::LD1Twov16b, ARM64::qsub0);
1973 else if (VT == MVT::v4i16)
1974 return SelectLoad(Node, 2, ARM64::LD1Twov4h, ARM64::dsub0);
1975 else if (VT == MVT::v8i16)
1976 return SelectLoad(Node, 2, ARM64::LD1Twov8h, ARM64::qsub0);
1977 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
1978 return SelectLoad(Node, 2, ARM64::LD1Twov2s, ARM64::dsub0);
1979 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
1980 return SelectLoad(Node, 2, ARM64::LD1Twov4s, ARM64::qsub0);
1981 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
1982 return SelectLoad(Node, 2, ARM64::LD1Twov1d, ARM64::dsub0);
1983 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
1984 return SelectLoad(Node, 2, ARM64::LD1Twov2d, ARM64::qsub0);
1986 case Intrinsic::arm64_neon_ld1x3:
1987 if (VT == MVT::v8i8)
1988 return SelectLoad(Node, 3, ARM64::LD1Threev8b, ARM64::dsub0);
1989 else if (VT == MVT::v16i8)
1990 return SelectLoad(Node, 3, ARM64::LD1Threev16b, ARM64::qsub0);
1991 else if (VT == MVT::v4i16)
1992 return SelectLoad(Node, 3, ARM64::LD1Threev4h, ARM64::dsub0);
1993 else if (VT == MVT::v8i16)
1994 return SelectLoad(Node, 3, ARM64::LD1Threev8h, ARM64::qsub0);
1995 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
1996 return SelectLoad(Node, 3, ARM64::LD1Threev2s, ARM64::dsub0);
1997 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
1998 return SelectLoad(Node, 3, ARM64::LD1Threev4s, ARM64::qsub0);
1999 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2000 return SelectLoad(Node, 3, ARM64::LD1Threev1d, ARM64::dsub0);
2001 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2002 return SelectLoad(Node, 3, ARM64::LD1Threev2d, ARM64::qsub0);
2004 case Intrinsic::arm64_neon_ld1x4:
2005 if (VT == MVT::v8i8)
2006 return SelectLoad(Node, 4, ARM64::LD1Fourv8b, ARM64::dsub0);
2007 else if (VT == MVT::v16i8)
2008 return SelectLoad(Node, 4, ARM64::LD1Fourv16b, ARM64::qsub0);
2009 else if (VT == MVT::v4i16)
2010 return SelectLoad(Node, 4, ARM64::LD1Fourv4h, ARM64::dsub0);
2011 else if (VT == MVT::v8i16)
2012 return SelectLoad(Node, 4, ARM64::LD1Fourv8h, ARM64::qsub0);
2013 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2014 return SelectLoad(Node, 4, ARM64::LD1Fourv2s, ARM64::dsub0);
2015 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2016 return SelectLoad(Node, 4, ARM64::LD1Fourv4s, ARM64::qsub0);
2017 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2018 return SelectLoad(Node, 4, ARM64::LD1Fourv1d, ARM64::dsub0);
2019 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2020 return SelectLoad(Node, 4, ARM64::LD1Fourv2d, ARM64::qsub0);
2022 case Intrinsic::arm64_neon_ld2:
2023 if (VT == MVT::v8i8)
2024 return SelectLoad(Node, 2, ARM64::LD2Twov8b, ARM64::dsub0);
2025 else if (VT == MVT::v16i8)
2026 return SelectLoad(Node, 2, ARM64::LD2Twov16b, ARM64::qsub0);
2027 else if (VT == MVT::v4i16)
2028 return SelectLoad(Node, 2, ARM64::LD2Twov4h, ARM64::dsub0);
2029 else if (VT == MVT::v8i16)
2030 return SelectLoad(Node, 2, ARM64::LD2Twov8h, ARM64::qsub0);
2031 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2032 return SelectLoad(Node, 2, ARM64::LD2Twov2s, ARM64::dsub0);
2033 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2034 return SelectLoad(Node, 2, ARM64::LD2Twov4s, ARM64::qsub0);
2035 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2036 return SelectLoad(Node, 2, ARM64::LD1Twov1d, ARM64::dsub0);
2037 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2038 return SelectLoad(Node, 2, ARM64::LD2Twov2d, ARM64::qsub0);
2040 case Intrinsic::arm64_neon_ld3:
2041 if (VT == MVT::v8i8)
2042 return SelectLoad(Node, 3, ARM64::LD3Threev8b, ARM64::dsub0);
2043 else if (VT == MVT::v16i8)
2044 return SelectLoad(Node, 3, ARM64::LD3Threev16b, ARM64::qsub0);
2045 else if (VT == MVT::v4i16)
2046 return SelectLoad(Node, 3, ARM64::LD3Threev4h, ARM64::dsub0);
2047 else if (VT == MVT::v8i16)
2048 return SelectLoad(Node, 3, ARM64::LD3Threev8h, ARM64::qsub0);
2049 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2050 return SelectLoad(Node, 3, ARM64::LD3Threev2s, ARM64::dsub0);
2051 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2052 return SelectLoad(Node, 3, ARM64::LD3Threev4s, ARM64::qsub0);
2053 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2054 return SelectLoad(Node, 3, ARM64::LD1Threev1d, ARM64::dsub0);
2055 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2056 return SelectLoad(Node, 3, ARM64::LD3Threev2d, ARM64::qsub0);
2058 case Intrinsic::arm64_neon_ld4:
2059 if (VT == MVT::v8i8)
2060 return SelectLoad(Node, 4, ARM64::LD4Fourv8b, ARM64::dsub0);
2061 else if (VT == MVT::v16i8)
2062 return SelectLoad(Node, 4, ARM64::LD4Fourv16b, ARM64::qsub0);
2063 else if (VT == MVT::v4i16)
2064 return SelectLoad(Node, 4, ARM64::LD4Fourv4h, ARM64::dsub0);
2065 else if (VT == MVT::v8i16)
2066 return SelectLoad(Node, 4, ARM64::LD4Fourv8h, ARM64::qsub0);
2067 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2068 return SelectLoad(Node, 4, ARM64::LD4Fourv2s, ARM64::dsub0);
2069 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2070 return SelectLoad(Node, 4, ARM64::LD4Fourv4s, ARM64::qsub0);
2071 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2072 return SelectLoad(Node, 4, ARM64::LD1Fourv1d, ARM64::dsub0);
2073 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2074 return SelectLoad(Node, 4, ARM64::LD4Fourv2d, ARM64::qsub0);
2076 case Intrinsic::arm64_neon_ld2r:
2077 if (VT == MVT::v8i8)
2078 return SelectLoad(Node, 2, ARM64::LD2Rv8b, ARM64::dsub0);
2079 else if (VT == MVT::v16i8)
2080 return SelectLoad(Node, 2, ARM64::LD2Rv16b, ARM64::qsub0);
2081 else if (VT == MVT::v4i16)
2082 return SelectLoad(Node, 2, ARM64::LD2Rv4h, ARM64::dsub0);
2083 else if (VT == MVT::v8i16)
2084 return SelectLoad(Node, 2, ARM64::LD2Rv8h, ARM64::qsub0);
2085 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2086 return SelectLoad(Node, 2, ARM64::LD2Rv2s, ARM64::dsub0);
2087 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2088 return SelectLoad(Node, 2, ARM64::LD2Rv4s, ARM64::qsub0);
2089 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2090 return SelectLoad(Node, 2, ARM64::LD2Rv1d, ARM64::dsub0);
2091 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2092 return SelectLoad(Node, 2, ARM64::LD2Rv2d, ARM64::qsub0);
2094 case Intrinsic::arm64_neon_ld3r:
2095 if (VT == MVT::v8i8)
2096 return SelectLoad(Node, 3, ARM64::LD3Rv8b, ARM64::dsub0);
2097 else if (VT == MVT::v16i8)
2098 return SelectLoad(Node, 3, ARM64::LD3Rv16b, ARM64::qsub0);
2099 else if (VT == MVT::v4i16)
2100 return SelectLoad(Node, 3, ARM64::LD3Rv4h, ARM64::dsub0);
2101 else if (VT == MVT::v8i16)
2102 return SelectLoad(Node, 3, ARM64::LD3Rv8h, ARM64::qsub0);
2103 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2104 return SelectLoad(Node, 3, ARM64::LD3Rv2s, ARM64::dsub0);
2105 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2106 return SelectLoad(Node, 3, ARM64::LD3Rv4s, ARM64::qsub0);
2107 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2108 return SelectLoad(Node, 3, ARM64::LD4Rv1d, ARM64::dsub0);
2109 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2110 return SelectLoad(Node, 3, ARM64::LD3Rv2d, ARM64::qsub0);
2112 case Intrinsic::arm64_neon_ld4r:
2113 if (VT == MVT::v8i8)
2114 return SelectLoad(Node, 4, ARM64::LD4Rv8b, ARM64::dsub0);
2115 else if (VT == MVT::v16i8)
2116 return SelectLoad(Node, 4, ARM64::LD4Rv16b, ARM64::qsub0);
2117 else if (VT == MVT::v4i16)
2118 return SelectLoad(Node, 4, ARM64::LD4Rv4h, ARM64::dsub0);
2119 else if (VT == MVT::v8i16)
2120 return SelectLoad(Node, 4, ARM64::LD4Rv8h, ARM64::qsub0);
2121 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2122 return SelectLoad(Node, 4, ARM64::LD4Rv2s, ARM64::dsub0);
2123 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2124 return SelectLoad(Node, 4, ARM64::LD4Rv4s, ARM64::qsub0);
2125 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2126 return SelectLoad(Node, 4, ARM64::LD4Rv1d, ARM64::dsub0);
2127 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2128 return SelectLoad(Node, 4, ARM64::LD4Rv2d, ARM64::qsub0);
2130 case Intrinsic::arm64_neon_ld2lane:
2131 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2132 return SelectLoadLane(Node, 2, ARM64::LD2i8);
2133 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2134 return SelectLoadLane(Node, 2, ARM64::LD2i16);
2135 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2137 return SelectLoadLane(Node, 2, ARM64::LD2i32);
2138 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2140 return SelectLoadLane(Node, 2, ARM64::LD2i64);
2142 case Intrinsic::arm64_neon_ld3lane:
2143 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2144 return SelectLoadLane(Node, 3, ARM64::LD3i8);
2145 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2146 return SelectLoadLane(Node, 3, ARM64::LD3i16);
2147 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2149 return SelectLoadLane(Node, 3, ARM64::LD3i32);
2150 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2152 return SelectLoadLane(Node, 3, ARM64::LD3i64);
2154 case Intrinsic::arm64_neon_ld4lane:
2155 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2156 return SelectLoadLane(Node, 4, ARM64::LD4i8);
2157 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2158 return SelectLoadLane(Node, 4, ARM64::LD4i16);
2159 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2161 return SelectLoadLane(Node, 4, ARM64::LD4i32);
2162 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2164 return SelectLoadLane(Node, 4, ARM64::LD4i64);
2168 case ISD::INTRINSIC_WO_CHAIN: {
2169 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2173 case Intrinsic::arm64_neon_tbl2:
2174 return SelectTable(Node, 2, VT == MVT::v8i8 ? ARM64::TBLv8i8Two
2175 : ARM64::TBLv16i8Two,
2177 case Intrinsic::arm64_neon_tbl3:
2178 return SelectTable(Node, 3, VT == MVT::v8i8 ? ARM64::TBLv8i8Three
2179 : ARM64::TBLv16i8Three,
2181 case Intrinsic::arm64_neon_tbl4:
2182 return SelectTable(Node, 4, VT == MVT::v8i8 ? ARM64::TBLv8i8Four
2183 : ARM64::TBLv16i8Four,
2185 case Intrinsic::arm64_neon_tbx2:
2186 return SelectTable(Node, 2, VT == MVT::v8i8 ? ARM64::TBXv8i8Two
2187 : ARM64::TBXv16i8Two,
2189 case Intrinsic::arm64_neon_tbx3:
2190 return SelectTable(Node, 3, VT == MVT::v8i8 ? ARM64::TBXv8i8Three
2191 : ARM64::TBXv16i8Three,
2193 case Intrinsic::arm64_neon_tbx4:
2194 return SelectTable(Node, 4, VT == MVT::v8i8 ? ARM64::TBXv8i8Four
2195 : ARM64::TBXv16i8Four,
2197 case Intrinsic::arm64_neon_smull:
2198 case Intrinsic::arm64_neon_umull:
2199 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2205 case ISD::INTRINSIC_VOID: {
2206 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2207 if (Node->getNumOperands() >= 3)
2208 VT = Node->getOperand(2)->getValueType(0);
2212 case Intrinsic::arm64_neon_st1x2: {
2213 if (VT == MVT::v8i8)
2214 return SelectStore(Node, 2, ARM64::ST1Twov8b);
2215 else if (VT == MVT::v16i8)
2216 return SelectStore(Node, 2, ARM64::ST1Twov16b);
2217 else if (VT == MVT::v4i16)
2218 return SelectStore(Node, 2, ARM64::ST1Twov4h);
2219 else if (VT == MVT::v8i16)
2220 return SelectStore(Node, 2, ARM64::ST1Twov8h);
2221 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2222 return SelectStore(Node, 2, ARM64::ST1Twov2s);
2223 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2224 return SelectStore(Node, 2, ARM64::ST1Twov4s);
2225 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2226 return SelectStore(Node, 2, ARM64::ST1Twov2d);
2227 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2228 return SelectStore(Node, 2, ARM64::ST1Twov1d);
2231 case Intrinsic::arm64_neon_st1x3: {
2232 if (VT == MVT::v8i8)
2233 return SelectStore(Node, 3, ARM64::ST1Threev8b);
2234 else if (VT == MVT::v16i8)
2235 return SelectStore(Node, 3, ARM64::ST1Threev16b);
2236 else if (VT == MVT::v4i16)
2237 return SelectStore(Node, 3, ARM64::ST1Threev4h);
2238 else if (VT == MVT::v8i16)
2239 return SelectStore(Node, 3, ARM64::ST1Threev8h);
2240 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2241 return SelectStore(Node, 3, ARM64::ST1Threev2s);
2242 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2243 return SelectStore(Node, 3, ARM64::ST1Threev4s);
2244 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2245 return SelectStore(Node, 3, ARM64::ST1Threev2d);
2246 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2247 return SelectStore(Node, 3, ARM64::ST1Threev1d);
2250 case Intrinsic::arm64_neon_st1x4: {
2251 if (VT == MVT::v8i8)
2252 return SelectStore(Node, 4, ARM64::ST1Fourv8b);
2253 else if (VT == MVT::v16i8)
2254 return SelectStore(Node, 4, ARM64::ST1Fourv16b);
2255 else if (VT == MVT::v4i16)
2256 return SelectStore(Node, 4, ARM64::ST1Fourv4h);
2257 else if (VT == MVT::v8i16)
2258 return SelectStore(Node, 4, ARM64::ST1Fourv8h);
2259 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2260 return SelectStore(Node, 4, ARM64::ST1Fourv2s);
2261 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2262 return SelectStore(Node, 4, ARM64::ST1Fourv4s);
2263 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2264 return SelectStore(Node, 4, ARM64::ST1Fourv2d);
2265 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2266 return SelectStore(Node, 4, ARM64::ST1Fourv1d);
2269 case Intrinsic::arm64_neon_st2: {
2270 if (VT == MVT::v8i8)
2271 return SelectStore(Node, 2, ARM64::ST2Twov8b);
2272 else if (VT == MVT::v16i8)
2273 return SelectStore(Node, 2, ARM64::ST2Twov16b);
2274 else if (VT == MVT::v4i16)
2275 return SelectStore(Node, 2, ARM64::ST2Twov4h);
2276 else if (VT == MVT::v8i16)
2277 return SelectStore(Node, 2, ARM64::ST2Twov8h);
2278 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2279 return SelectStore(Node, 2, ARM64::ST2Twov2s);
2280 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2281 return SelectStore(Node, 2, ARM64::ST2Twov4s);
2282 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2283 return SelectStore(Node, 2, ARM64::ST2Twov2d);
2284 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2285 return SelectStore(Node, 2, ARM64::ST1Twov1d);
2288 case Intrinsic::arm64_neon_st3: {
2289 if (VT == MVT::v8i8)
2290 return SelectStore(Node, 3, ARM64::ST3Threev8b);
2291 else if (VT == MVT::v16i8)
2292 return SelectStore(Node, 3, ARM64::ST3Threev16b);
2293 else if (VT == MVT::v4i16)
2294 return SelectStore(Node, 3, ARM64::ST3Threev4h);
2295 else if (VT == MVT::v8i16)
2296 return SelectStore(Node, 3, ARM64::ST3Threev8h);
2297 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2298 return SelectStore(Node, 3, ARM64::ST3Threev2s);
2299 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2300 return SelectStore(Node, 3, ARM64::ST3Threev4s);
2301 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2302 return SelectStore(Node, 3, ARM64::ST3Threev2d);
2303 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2304 return SelectStore(Node, 3, ARM64::ST1Threev1d);
2307 case Intrinsic::arm64_neon_st4: {
2308 if (VT == MVT::v8i8)
2309 return SelectStore(Node, 4, ARM64::ST4Fourv8b);
2310 else if (VT == MVT::v16i8)
2311 return SelectStore(Node, 4, ARM64::ST4Fourv16b);
2312 else if (VT == MVT::v4i16)
2313 return SelectStore(Node, 4, ARM64::ST4Fourv4h);
2314 else if (VT == MVT::v8i16)
2315 return SelectStore(Node, 4, ARM64::ST4Fourv8h);
2316 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2317 return SelectStore(Node, 4, ARM64::ST4Fourv2s);
2318 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2319 return SelectStore(Node, 4, ARM64::ST4Fourv4s);
2320 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2321 return SelectStore(Node, 4, ARM64::ST4Fourv2d);
2322 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2323 return SelectStore(Node, 4, ARM64::ST1Fourv1d);
2326 case Intrinsic::arm64_neon_st2lane: {
2327 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2328 return SelectStoreLane(Node, 2, ARM64::ST2i8);
2329 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2330 return SelectStoreLane(Node, 2, ARM64::ST2i16);
2331 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2333 return SelectStoreLane(Node, 2, ARM64::ST2i32);
2334 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2336 return SelectStoreLane(Node, 2, ARM64::ST2i64);
2339 case Intrinsic::arm64_neon_st3lane: {
2340 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2341 return SelectStoreLane(Node, 3, ARM64::ST3i8);
2342 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2343 return SelectStoreLane(Node, 3, ARM64::ST3i16);
2344 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2346 return SelectStoreLane(Node, 3, ARM64::ST3i32);
2347 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2349 return SelectStoreLane(Node, 3, ARM64::ST3i64);
2352 case Intrinsic::arm64_neon_st4lane: {
2353 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2354 return SelectStoreLane(Node, 4, ARM64::ST4i8);
2355 else if (VT == MVT::v8i16 || VT == MVT::v4i16)
2356 return SelectStoreLane(Node, 4, ARM64::ST4i16);
2357 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2359 return SelectStoreLane(Node, 4, ARM64::ST4i32);
2360 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2362 return SelectStoreLane(Node, 4, ARM64::ST4i64);
2372 if (SDNode *I = SelectLIBM(Node))
2377 // Select the default instruction
2378 ResNode = SelectCode(Node);
2380 DEBUG(errs() << "=> ");
2381 if (ResNode == NULL || ResNode == Node)
2382 DEBUG(Node->dump(CurDAG));
2384 DEBUG(ResNode->dump(CurDAG));
2385 DEBUG(errs() << "\n");
2390 /// createARM64ISelDag - This pass converts a legalized DAG into a
2391 /// ARM64-specific DAG, ready for instruction scheduling.
2392 FunctionPass *llvm::createARM64ISelDag(ARM64TargetMachine &TM,
2393 CodeGenOpt::Level OptLevel) {
2394 return new ARM64DAGToDAGISel(TM, OptLevel);