1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the AArch64 target.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64TargetMachine.h"
15 #include "MCTargetDesc/AArch64AddressingModes.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/CodeGen/SelectionDAGISel.h"
18 #include "llvm/IR/Function.h" // To access function attributes.
19 #include "llvm/IR/GlobalValue.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
28 #define DEBUG_TYPE "aarch64-isel"
30 //===--------------------------------------------------------------------===//
31 /// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
32 /// instructions for SelectionDAG operations.
36 class AArch64DAGToDAGISel : public SelectionDAGISel {
38 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
39 /// make the right decision when generating code for different targets.
40 const AArch64Subtarget *Subtarget;
45 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
46 CodeGenOpt::Level OptLevel)
47 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr),
50 const char *getPassName() const override {
51 return "AArch64 Instruction Selection";
54 bool runOnMachineFunction(MachineFunction &MF) override {
55 ForCodeSize = MF.getFunction()->optForSize();
56 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
57 return SelectionDAGISel::runOnMachineFunction(MF);
60 SDNode *Select(SDNode *Node) override;
62 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
63 /// inline asm expressions.
64 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
65 unsigned ConstraintID,
66 std::vector<SDValue> &OutOps) override;
68 SDNode *SelectMLAV64LaneV128(SDNode *N);
69 SDNode *SelectMULLV64LaneV128(unsigned IntNo, SDNode *N);
70 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
71 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
72 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
73 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
74 return SelectShiftedRegister(N, false, Reg, Shift);
76 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
77 return SelectShiftedRegister(N, true, Reg, Shift);
79 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) {
80 return SelectAddrModeIndexed7S(N, 1, Base, OffImm);
82 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) {
83 return SelectAddrModeIndexed7S(N, 2, Base, OffImm);
85 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) {
86 return SelectAddrModeIndexed7S(N, 4, Base, OffImm);
88 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) {
89 return SelectAddrModeIndexed7S(N, 8, Base, OffImm);
91 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {
92 return SelectAddrModeIndexed7S(N, 16, Base, OffImm);
94 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {
95 return SelectAddrModeIndexed(N, 1, Base, OffImm);
97 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) {
98 return SelectAddrModeIndexed(N, 2, Base, OffImm);
100 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) {
101 return SelectAddrModeIndexed(N, 4, Base, OffImm);
103 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) {
104 return SelectAddrModeIndexed(N, 8, Base, OffImm);
106 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) {
107 return SelectAddrModeIndexed(N, 16, Base, OffImm);
109 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) {
110 return SelectAddrModeUnscaled(N, 1, Base, OffImm);
112 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) {
113 return SelectAddrModeUnscaled(N, 2, Base, OffImm);
115 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) {
116 return SelectAddrModeUnscaled(N, 4, Base, OffImm);
118 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) {
119 return SelectAddrModeUnscaled(N, 8, Base, OffImm);
121 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) {
122 return SelectAddrModeUnscaled(N, 16, Base, OffImm);
126 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset,
127 SDValue &SignExtend, SDValue &DoShift) {
128 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
132 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset,
133 SDValue &SignExtend, SDValue &DoShift) {
134 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift);
138 /// Form sequences of consecutive 64/128-bit registers for use in NEON
139 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have
140 /// between 1 and 4 elements. If it contains a single element that is returned
141 /// unchanged; otherwise a REG_SEQUENCE value is returned.
142 SDValue createDTuple(ArrayRef<SDValue> Vecs);
143 SDValue createQTuple(ArrayRef<SDValue> Vecs);
145 /// Generic helper for the createDTuple/createQTuple
146 /// functions. Those should almost always be called instead.
147 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[],
148 const unsigned SubRegs[]);
150 SDNode *SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt);
152 SDNode *SelectIndexedLoad(SDNode *N, bool &Done);
154 SDNode *SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
156 SDNode *SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
158 SDNode *SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
159 SDNode *SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc);
161 SDNode *SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc);
162 SDNode *SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
163 SDNode *SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
164 SDNode *SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
166 SDNode *SelectBitfieldExtractOp(SDNode *N);
167 SDNode *SelectBitfieldInsertOp(SDNode *N);
168 SDNode *SelectBitfieldInsertInZeroOp(SDNode *N);
170 SDNode *SelectReadRegister(SDNode *N);
171 SDNode *SelectWriteRegister(SDNode *N);
173 // Include the pieces autogenerated from the target description.
174 #include "AArch64GenDAGISel.inc"
177 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,
179 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base,
181 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,
183 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base,
185 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base,
186 SDValue &Offset, SDValue &SignExtend,
188 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base,
189 SDValue &Offset, SDValue &SignExtend,
191 bool isWorthFolding(SDValue V) const;
192 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend,
193 SDValue &Offset, SDValue &SignExtend);
195 template<unsigned RegWidth>
196 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
197 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
200 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width);
202 } // end anonymous namespace
204 /// isIntImmediate - This method tests to see if the node is a constant
205 /// operand. If so Imm will receive the 32-bit value.
206 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {
207 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) {
208 Imm = C->getZExtValue();
214 // isIntImmediate - This method tests to see if a constant operand.
215 // If so Imm will receive the value.
216 static bool isIntImmediate(SDValue N, uint64_t &Imm) {
217 return isIntImmediate(N.getNode(), Imm);
220 // isOpcWithIntImmediate - This method tests to see if the node is a specific
221 // opcode and that it has a immediate integer right operand.
222 // If so Imm will receive the 32 bit value.
223 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
225 return N->getOpcode() == Opc &&
226 isIntImmediate(N->getOperand(1).getNode(), Imm);
229 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
230 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
231 switch(ConstraintID) {
233 llvm_unreachable("Unexpected asm memory constraint");
234 case InlineAsm::Constraint_i:
235 case InlineAsm::Constraint_m:
236 case InlineAsm::Constraint_Q:
237 // Require the address to be in a register. That is safe for all AArch64
238 // variants and it is hard to do anything much smarter without knowing
239 // how the operand is used.
240 OutOps.push_back(Op);
246 /// SelectArithImmed - Select an immediate value that can be represented as
247 /// a 12-bit value shifted left by either 0 or 12. If so, return true with
248 /// Val set to the 12-bit value and Shift set to the shifter operand.
249 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
251 // This function is called from the addsub_shifted_imm ComplexPattern,
252 // which lists [imm] as the list of opcode it's interested in, however
253 // we still need to check whether the operand is actually an immediate
254 // here because the ComplexPattern opcode list is only used in
255 // root-level opcode matching.
256 if (!isa<ConstantSDNode>(N.getNode()))
259 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
262 if (Immed >> 12 == 0) {
264 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
270 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
272 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32);
273 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32);
277 /// SelectNegArithImmed - As above, but negates the value before trying to
279 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
281 // This function is called from the addsub_shifted_imm ComplexPattern,
282 // which lists [imm] as the list of opcode it's interested in, however
283 // we still need to check whether the operand is actually an immediate
284 // here because the ComplexPattern opcode list is only used in
285 // root-level opcode matching.
286 if (!isa<ConstantSDNode>(N.getNode()))
289 // The immediate operand must be a 24-bit zero-extended immediate.
290 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue();
292 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0"
293 // have the opposite effect on the C flag, so this pattern mustn't match under
294 // those circumstances.
298 if (N.getValueType() == MVT::i32)
299 Immed = ~((uint32_t)Immed) + 1;
301 Immed = ~Immed + 1ULL;
302 if (Immed & 0xFFFFFFFFFF000000ULL)
305 Immed &= 0xFFFFFFULL;
306 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val,
310 /// getShiftTypeForNode - Translate a shift node to the corresponding
312 static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {
313 switch (N.getOpcode()) {
315 return AArch64_AM::InvalidShiftExtend;
317 return AArch64_AM::LSL;
319 return AArch64_AM::LSR;
321 return AArch64_AM::ASR;
323 return AArch64_AM::ROR;
327 /// \brief Determine whether it is worth to fold V into an extended register.
328 bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const {
329 // it hurts if the value is used at least twice, unless we are optimizing
331 if (ForCodeSize || V.hasOneUse())
336 /// SelectShiftedRegister - Select a "shifted register" operand. If the value
337 /// is not shifted, set the Shift operand to default of "LSL 0". The logical
338 /// instructions allow the shifted register to be rotated, but the arithmetic
339 /// instructions do not. The AllowROR parameter specifies whether ROR is
341 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
342 SDValue &Reg, SDValue &Shift) {
343 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N);
344 if (ShType == AArch64_AM::InvalidShiftExtend)
346 if (!AllowROR && ShType == AArch64_AM::ROR)
349 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
350 unsigned BitSize = N.getValueType().getSizeInBits();
351 unsigned Val = RHS->getZExtValue() & (BitSize - 1);
352 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val);
354 Reg = N.getOperand(0);
355 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32);
356 return isWorthFolding(N);
362 /// getExtendTypeForNode - Translate an extend node to the corresponding
363 /// ExtendType value.
364 static AArch64_AM::ShiftExtendType
365 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {
366 if (N.getOpcode() == ISD::SIGN_EXTEND ||
367 N.getOpcode() == ISD::SIGN_EXTEND_INREG) {
369 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG)
370 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT();
372 SrcVT = N.getOperand(0).getValueType();
374 if (!IsLoadStore && SrcVT == MVT::i8)
375 return AArch64_AM::SXTB;
376 else if (!IsLoadStore && SrcVT == MVT::i16)
377 return AArch64_AM::SXTH;
378 else if (SrcVT == MVT::i32)
379 return AArch64_AM::SXTW;
380 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
382 return AArch64_AM::InvalidShiftExtend;
383 } else if (N.getOpcode() == ISD::ZERO_EXTEND ||
384 N.getOpcode() == ISD::ANY_EXTEND) {
385 EVT SrcVT = N.getOperand(0).getValueType();
386 if (!IsLoadStore && SrcVT == MVT::i8)
387 return AArch64_AM::UXTB;
388 else if (!IsLoadStore && SrcVT == MVT::i16)
389 return AArch64_AM::UXTH;
390 else if (SrcVT == MVT::i32)
391 return AArch64_AM::UXTW;
392 assert(SrcVT != MVT::i64 && "extend from 64-bits?");
394 return AArch64_AM::InvalidShiftExtend;
395 } else if (N.getOpcode() == ISD::AND) {
396 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
398 return AArch64_AM::InvalidShiftExtend;
399 uint64_t AndMask = CSD->getZExtValue();
403 return AArch64_AM::InvalidShiftExtend;
405 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend;
407 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend;
409 return AArch64_AM::UXTW;
413 return AArch64_AM::InvalidShiftExtend;
416 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts.
417 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) {
418 if (DL->getOpcode() != AArch64ISD::DUPLANE16 &&
419 DL->getOpcode() != AArch64ISD::DUPLANE32)
422 SDValue SV = DL->getOperand(0);
423 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR)
426 SDValue EV = SV.getOperand(1);
427 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR)
430 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode());
431 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode());
432 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue();
433 LaneOp = EV.getOperand(0);
438 // Helper for SelectOpcV64LaneV128 - Recognize operations where one operand is a
439 // high lane extract.
440 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp,
441 SDValue &LaneOp, int &LaneIdx) {
443 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) {
445 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx))
452 /// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand
453 /// is a lane in the upper half of a 128-bit vector. Recognize and select this
454 /// so that we don't emit unnecessary lane extracts.
455 SDNode *AArch64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) {
457 SDValue Op0 = N->getOperand(0);
458 SDValue Op1 = N->getOperand(1);
459 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA.
460 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA.
461 int LaneIdx = -1; // Will hold the lane index.
463 if (Op1.getOpcode() != ISD::MUL ||
464 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
467 if (Op1.getOpcode() != ISD::MUL ||
468 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2,
473 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
475 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal };
477 unsigned MLAOpc = ~0U;
479 switch (N->getSimpleValueType(0).SimpleTy) {
481 llvm_unreachable("Unrecognized MLA.");
483 MLAOpc = AArch64::MLAv4i16_indexed;
486 MLAOpc = AArch64::MLAv8i16_indexed;
489 MLAOpc = AArch64::MLAv2i32_indexed;
492 MLAOpc = AArch64::MLAv4i32_indexed;
496 return CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops);
499 SDNode *AArch64DAGToDAGISel::SelectMULLV64LaneV128(unsigned IntNo, SDNode *N) {
505 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1,
509 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64);
511 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal };
513 unsigned SMULLOpc = ~0U;
515 if (IntNo == Intrinsic::aarch64_neon_smull) {
516 switch (N->getSimpleValueType(0).SimpleTy) {
518 llvm_unreachable("Unrecognized SMULL.");
520 SMULLOpc = AArch64::SMULLv4i16_indexed;
523 SMULLOpc = AArch64::SMULLv2i32_indexed;
526 } else if (IntNo == Intrinsic::aarch64_neon_umull) {
527 switch (N->getSimpleValueType(0).SimpleTy) {
529 llvm_unreachable("Unrecognized SMULL.");
531 SMULLOpc = AArch64::UMULLv4i16_indexed;
534 SMULLOpc = AArch64::UMULLv2i32_indexed;
538 llvm_unreachable("Unrecognized intrinsic.");
540 return CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops);
543 /// Instructions that accept extend modifiers like UXTW expect the register
544 /// being extended to be a GPR32, but the incoming DAG might be acting on a
545 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
546 /// this is the case.
547 static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {
548 if (N.getValueType() == MVT::i32)
552 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
553 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
554 dl, MVT::i32, N, SubReg);
555 return SDValue(Node, 0);
559 /// SelectArithExtendedRegister - Select a "extended register" operand. This
560 /// operand folds in an extend followed by an optional left shift.
561 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
563 unsigned ShiftVal = 0;
564 AArch64_AM::ShiftExtendType Ext;
566 if (N.getOpcode() == ISD::SHL) {
567 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
570 ShiftVal = CSD->getZExtValue();
574 Ext = getExtendTypeForNode(N.getOperand(0));
575 if (Ext == AArch64_AM::InvalidShiftExtend)
578 Reg = N.getOperand(0).getOperand(0);
580 Ext = getExtendTypeForNode(N);
581 if (Ext == AArch64_AM::InvalidShiftExtend)
584 Reg = N.getOperand(0);
587 // AArch64 mandates that the RHS of the operation must use the smallest
588 // register class that could contain the size being extended from. Thus,
589 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though
590 // there might not be an actual 32-bit value in the program. We can
591 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here.
592 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX);
593 Reg = narrowIfNeeded(CurDAG, Reg);
594 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
596 return isWorthFolding(N);
599 /// If there's a use of this ADDlow that's not itself a load/store then we'll
600 /// need to create a real ADD instruction from it anyway and there's no point in
601 /// folding it into the mem op. Theoretically, it shouldn't matter, but there's
602 /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
603 /// leads to duplicated ADRP instructions.
604 static bool isWorthFoldingADDlow(SDValue N) {
605 for (auto Use : N->uses()) {
606 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
607 Use->getOpcode() != ISD::ATOMIC_LOAD &&
608 Use->getOpcode() != ISD::ATOMIC_STORE)
611 // ldar and stlr have much more restrictive addressing modes (just a
613 if (cast<MemSDNode>(Use)->getOrdering() > Monotonic)
620 /// SelectAddrModeIndexed7S - Select a "register plus scaled signed 7-bit
621 /// immediate" address. The "Size" argument is the size in bytes of the memory
622 /// reference, which determines the scale.
623 bool AArch64DAGToDAGISel::SelectAddrModeIndexed7S(SDValue N, unsigned Size,
627 const DataLayout &DL = CurDAG->getDataLayout();
628 const TargetLowering *TLI = getTargetLowering();
629 if (N.getOpcode() == ISD::FrameIndex) {
630 int FI = cast<FrameIndexSDNode>(N)->getIndex();
631 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
632 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
636 // As opposed to the (12-bit) Indexed addressing mode below, the 7-bit signed
637 // selected here doesn't support labels/immediates, only base+offset.
639 if (CurDAG->isBaseWithConstantOffset(N)) {
640 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
641 int64_t RHSC = RHS->getSExtValue();
642 unsigned Scale = Log2_32(Size);
643 if ((RHSC & (Size - 1)) == 0 && RHSC >= -(0x40 << Scale) &&
644 RHSC < (0x40 << Scale)) {
645 Base = N.getOperand(0);
646 if (Base.getOpcode() == ISD::FrameIndex) {
647 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
648 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
650 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
656 // Base only. The address will be materialized into a register before
657 // the memory is accessed.
658 // add x0, Xbase, #offset
661 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
665 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
666 /// immediate" address. The "Size" argument is the size in bytes of the memory
667 /// reference, which determines the scale.
668 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
669 SDValue &Base, SDValue &OffImm) {
671 const DataLayout &DL = CurDAG->getDataLayout();
672 const TargetLowering *TLI = getTargetLowering();
673 if (N.getOpcode() == ISD::FrameIndex) {
674 int FI = cast<FrameIndexSDNode>(N)->getIndex();
675 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
676 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
680 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) {
681 GlobalAddressSDNode *GAN =
682 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode());
683 Base = N.getOperand(0);
684 OffImm = N.getOperand(1);
688 const GlobalValue *GV = GAN->getGlobal();
689 unsigned Alignment = GV->getAlignment();
690 Type *Ty = GV->getType()->getElementType();
691 if (Alignment == 0 && Ty->isSized())
692 Alignment = DL.getABITypeAlignment(Ty);
694 if (Alignment >= Size)
698 if (CurDAG->isBaseWithConstantOffset(N)) {
699 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
700 int64_t RHSC = (int64_t)RHS->getZExtValue();
701 unsigned Scale = Log2_32(Size);
702 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
703 Base = N.getOperand(0);
704 if (Base.getOpcode() == ISD::FrameIndex) {
705 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
706 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
708 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
714 // Before falling back to our general case, check if the unscaled
715 // instructions can handle this. If so, that's preferable.
716 if (SelectAddrModeUnscaled(N, Size, Base, OffImm))
719 // Base only. The address will be materialized into a register before
720 // the memory is accessed.
721 // add x0, Xbase, #offset
724 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64);
728 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
729 /// immediate" address. This should only match when there is an offset that
730 /// is not valid for a scaled immediate addressing mode. The "Size" argument
731 /// is the size in bytes of the memory reference, which is needed here to know
732 /// what is valid for a scaled immediate.
733 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
736 if (!CurDAG->isBaseWithConstantOffset(N))
738 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
739 int64_t RHSC = RHS->getSExtValue();
740 // If the offset is valid as a scaled immediate, don't match here.
741 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
742 RHSC < (0x1000 << Log2_32(Size)))
744 if (RHSC >= -256 && RHSC < 256) {
745 Base = N.getOperand(0);
746 if (Base.getOpcode() == ISD::FrameIndex) {
747 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
748 const TargetLowering *TLI = getTargetLowering();
749 Base = CurDAG->getTargetFrameIndex(
750 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
752 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64);
759 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {
761 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
762 SDValue ImpDef = SDValue(
763 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0);
764 MachineSDNode *Node = CurDAG->getMachineNode(
765 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg);
766 return SDValue(Node, 0);
769 /// \brief Check if the given SHL node (\p N), can be used to form an
770 /// extended register for an addressing mode.
771 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
772 bool WantExtend, SDValue &Offset,
773 SDValue &SignExtend) {
774 assert(N.getOpcode() == ISD::SHL && "Invalid opcode.");
775 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
776 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue())
781 AArch64_AM::ShiftExtendType Ext =
782 getExtendTypeForNode(N.getOperand(0), true);
783 if (Ext == AArch64_AM::InvalidShiftExtend)
786 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0));
787 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
790 Offset = N.getOperand(0);
791 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32);
794 unsigned LegalShiftVal = Log2_32(Size);
795 unsigned ShiftVal = CSD->getZExtValue();
797 if (ShiftVal != 0 && ShiftVal != LegalShiftVal)
800 if (isWorthFolding(N))
806 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
807 SDValue &Base, SDValue &Offset,
810 if (N.getOpcode() != ISD::ADD)
812 SDValue LHS = N.getOperand(0);
813 SDValue RHS = N.getOperand(1);
816 // We don't want to match immediate adds here, because they are better lowered
817 // to the register-immediate addressing modes.
818 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS))
821 // Check if this particular node is reused in any non-memory related
822 // operation. If yes, do not try to fold this node into the address
823 // computation, since the computation will be kept.
824 const SDNode *Node = N.getNode();
825 for (SDNode *UI : Node->uses()) {
826 if (!isa<MemSDNode>(*UI))
830 // Remember if it is worth folding N when it produces extended register.
831 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
833 // Try to match a shifted extend on the RHS.
834 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
835 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) {
837 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
841 // Try to match a shifted extend on the LHS.
842 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
843 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) {
845 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32);
849 // There was no shift, whatever else we find.
850 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32);
852 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend;
853 // Try to match an unshifted extend on the LHS.
854 if (IsExtendedRegisterWorthFolding &&
855 (Ext = getExtendTypeForNode(LHS, true)) !=
856 AArch64_AM::InvalidShiftExtend) {
858 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0));
859 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
861 if (isWorthFolding(LHS))
865 // Try to match an unshifted extend on the RHS.
866 if (IsExtendedRegisterWorthFolding &&
867 (Ext = getExtendTypeForNode(RHS, true)) !=
868 AArch64_AM::InvalidShiftExtend) {
870 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0));
871 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl,
873 if (isWorthFolding(RHS))
880 // Check if the given immediate is preferred by ADD. If an immediate can be
881 // encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
882 // encoded by one MOVZ, return true.
883 static bool isPreferredADD(int64_t ImmOff) {
884 // Constant in [0x0, 0xfff] can be encoded in ADD.
885 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL)
887 // Check if it can be encoded in an "ADD LSL #12".
888 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL)
889 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant.
890 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL &&
891 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL;
895 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
896 SDValue &Base, SDValue &Offset,
899 if (N.getOpcode() != ISD::ADD)
901 SDValue LHS = N.getOperand(0);
902 SDValue RHS = N.getOperand(1);
905 // Check if this particular node is reused in any non-memory related
906 // operation. If yes, do not try to fold this node into the address
907 // computation, since the computation will be kept.
908 const SDNode *Node = N.getNode();
909 for (SDNode *UI : Node->uses()) {
910 if (!isa<MemSDNode>(*UI))
914 // Watch out if RHS is a wide immediate, it can not be selected into
915 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into
916 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate
917 // instructions like:
918 // MOV X0, WideImmediate
919 // ADD X1, BaseReg, X0
921 // For such situation, using [BaseReg, XReg] addressing mode can save one
923 // MOV X0, WideImmediate
924 // LDR X2, [BaseReg, X0]
925 if (isa<ConstantSDNode>(RHS)) {
926 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
927 unsigned Scale = Log2_32(Size);
928 // Skip the immediate can be selected by load/store addressing mode.
929 // Also skip the immediate can be encoded by a single ADD (SUB is also
930 // checked by using -ImmOff).
931 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
932 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
935 SDValue Ops[] = { RHS };
937 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops);
938 SDValue MOVIV = SDValue(MOVI, 0);
939 // This ADD of two X register will be selected into [Reg+Reg] mode.
940 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV);
943 // Remember if it is worth folding N when it produces extended register.
944 bool IsExtendedRegisterWorthFolding = isWorthFolding(N);
946 // Try to match a shifted extend on the RHS.
947 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL &&
948 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) {
950 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
954 // Try to match a shifted extend on the LHS.
955 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL &&
956 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) {
958 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32);
962 // Match any non-shifted, non-extend, non-immediate add expression.
965 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32);
966 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32);
967 // Reg1 + Reg2 is free: no check needed.
971 SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {
972 static const unsigned RegClassIDs[] = {
973 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID};
974 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1,
975 AArch64::dsub2, AArch64::dsub3};
977 return createTuple(Regs, RegClassIDs, SubRegs);
980 SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {
981 static const unsigned RegClassIDs[] = {
982 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID};
983 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1,
984 AArch64::qsub2, AArch64::qsub3};
986 return createTuple(Regs, RegClassIDs, SubRegs);
989 SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
990 const unsigned RegClassIDs[],
991 const unsigned SubRegs[]) {
992 // There's no special register-class for a vector-list of 1 element: it's just
994 if (Regs.size() == 1)
997 assert(Regs.size() >= 2 && Regs.size() <= 4);
1001 SmallVector<SDValue, 4> Ops;
1003 // First operand of REG_SEQUENCE is the desired RegClass.
1005 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32));
1007 // Then we get pairs of source & subregister-position for the components.
1008 for (unsigned i = 0; i < Regs.size(); ++i) {
1009 Ops.push_back(Regs[i]);
1010 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32));
1014 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
1015 return SDValue(N, 0);
1018 SDNode *AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs,
1019 unsigned Opc, bool isExt) {
1021 EVT VT = N->getValueType(0);
1023 unsigned ExtOff = isExt;
1025 // Form a REG_SEQUENCE to force register allocation.
1026 unsigned Vec0Off = ExtOff + 1;
1027 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off,
1028 N->op_begin() + Vec0Off + NumVecs);
1029 SDValue RegSeq = createQTuple(Regs);
1031 SmallVector<SDValue, 6> Ops;
1033 Ops.push_back(N->getOperand(1));
1034 Ops.push_back(RegSeq);
1035 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1));
1036 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
1039 SDNode *AArch64DAGToDAGISel::SelectIndexedLoad(SDNode *N, bool &Done) {
1040 LoadSDNode *LD = cast<LoadSDNode>(N);
1041 if (LD->isUnindexed())
1043 EVT VT = LD->getMemoryVT();
1044 EVT DstVT = N->getValueType(0);
1045 ISD::MemIndexedMode AM = LD->getAddressingMode();
1046 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
1048 // We're not doing validity checking here. That was done when checking
1049 // if we should mark the load as indexed or not. We're just selecting
1050 // the right instruction.
1051 unsigned Opcode = 0;
1053 ISD::LoadExtType ExtType = LD->getExtensionType();
1054 bool InsertTo64 = false;
1056 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost;
1057 else if (VT == MVT::i32) {
1058 if (ExtType == ISD::NON_EXTLOAD)
1059 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1060 else if (ExtType == ISD::SEXTLOAD)
1061 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost;
1063 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost;
1065 // The result of the load is only i32. It's the subreg_to_reg that makes
1069 } else if (VT == MVT::i16) {
1070 if (ExtType == ISD::SEXTLOAD) {
1071 if (DstVT == MVT::i64)
1072 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost;
1074 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost;
1076 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost;
1077 InsertTo64 = DstVT == MVT::i64;
1078 // The result of the load is only i32. It's the subreg_to_reg that makes
1082 } else if (VT == MVT::i8) {
1083 if (ExtType == ISD::SEXTLOAD) {
1084 if (DstVT == MVT::i64)
1085 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost;
1087 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost;
1089 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost;
1090 InsertTo64 = DstVT == MVT::i64;
1091 // The result of the load is only i32. It's the subreg_to_reg that makes
1095 } else if (VT == MVT::f16) {
1096 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost;
1097 } else if (VT == MVT::f32) {
1098 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost;
1099 } else if (VT == MVT::f64 || VT.is64BitVector()) {
1100 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost;
1101 } else if (VT.is128BitVector()) {
1102 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost;
1105 SDValue Chain = LD->getChain();
1106 SDValue Base = LD->getBasePtr();
1107 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset());
1108 int OffsetVal = (int)OffsetOp->getZExtValue();
1110 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64);
1111 SDValue Ops[] = { Base, Offset, Chain };
1112 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT,
1114 // Either way, we're replacing the node, so tell the caller that.
1116 SDValue LoadedVal = SDValue(Res, 1);
1118 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1120 SDValue(CurDAG->getMachineNode(
1121 AArch64::SUBREG_TO_REG, dl, MVT::i64,
1122 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal,
1127 ReplaceUses(SDValue(N, 0), LoadedVal);
1128 ReplaceUses(SDValue(N, 1), SDValue(Res, 0));
1129 ReplaceUses(SDValue(N, 2), SDValue(Res, 2));
1134 SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
1135 unsigned Opc, unsigned SubRegIdx) {
1137 EVT VT = N->getValueType(0);
1138 SDValue Chain = N->getOperand(0);
1140 SDValue Ops[] = {N->getOperand(2), // Mem operand;
1143 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1145 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1146 SDValue SuperReg = SDValue(Ld, 0);
1147 for (unsigned i = 0; i < NumVecs; ++i)
1148 ReplaceUses(SDValue(N, i),
1149 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1151 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1155 SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
1156 unsigned Opc, unsigned SubRegIdx) {
1158 EVT VT = N->getValueType(0);
1159 SDValue Chain = N->getOperand(0);
1161 SDValue Ops[] = {N->getOperand(1), // Mem operand
1162 N->getOperand(2), // Incremental
1165 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1166 MVT::Untyped, MVT::Other};
1168 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1170 // Update uses of write back register
1171 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1173 // Update uses of vector list
1174 SDValue SuperReg = SDValue(Ld, 1);
1176 ReplaceUses(SDValue(N, 0), SuperReg);
1178 for (unsigned i = 0; i < NumVecs; ++i)
1179 ReplaceUses(SDValue(N, i),
1180 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg));
1183 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1187 SDNode *AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
1190 EVT VT = N->getOperand(2)->getValueType(0);
1192 // Form a REG_SEQUENCE to force register allocation.
1193 bool Is128Bit = VT.getSizeInBits() == 128;
1194 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1195 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1197 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)};
1198 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
1203 SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
1206 EVT VT = N->getOperand(2)->getValueType(0);
1207 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1208 MVT::Other}; // Type for the Chain
1210 // Form a REG_SEQUENCE to force register allocation.
1211 bool Is128Bit = VT.getSizeInBits() == 128;
1212 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1213 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs);
1215 SDValue Ops[] = {RegSeq,
1216 N->getOperand(NumVecs + 1), // base register
1217 N->getOperand(NumVecs + 2), // Incremental
1218 N->getOperand(0)}; // Chain
1219 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1225 /// WidenVector - Given a value in the V64 register class, produce the
1226 /// equivalent value in the V128 register class.
1231 WidenVector(SelectionDAG &DAG) : DAG(DAG) {}
1233 SDValue operator()(SDValue V64Reg) {
1234 EVT VT = V64Reg.getValueType();
1235 unsigned NarrowSize = VT.getVectorNumElements();
1236 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1237 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize);
1241 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0);
1242 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg);
1247 /// NarrowVector - Given a value in the V128 register class, produce the
1248 /// equivalent value in the V64 register class.
1249 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {
1250 EVT VT = V128Reg.getValueType();
1251 unsigned WideSize = VT.getVectorNumElements();
1252 MVT EltTy = VT.getVectorElementType().getSimpleVT();
1253 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2);
1255 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy,
1259 SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
1262 EVT VT = N->getValueType(0);
1263 bool Narrow = VT.getSizeInBits() == 64;
1265 // Form a REG_SEQUENCE to force register allocation.
1266 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1269 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1270 WidenVector(*CurDAG));
1272 SDValue RegSeq = createQTuple(Regs);
1274 const EVT ResTys[] = {MVT::Untyped, MVT::Other};
1277 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1279 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1280 N->getOperand(NumVecs + 3), N->getOperand(0)};
1281 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1282 SDValue SuperReg = SDValue(Ld, 0);
1284 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1285 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1286 AArch64::qsub2, AArch64::qsub3 };
1287 for (unsigned i = 0; i < NumVecs; ++i) {
1288 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg);
1290 NV = NarrowVector(NV, *CurDAG);
1291 ReplaceUses(SDValue(N, i), NV);
1294 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1));
1299 SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
1302 EVT VT = N->getValueType(0);
1303 bool Narrow = VT.getSizeInBits() == 64;
1305 // Form a REG_SEQUENCE to force register allocation.
1306 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1309 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1310 WidenVector(*CurDAG));
1312 SDValue RegSeq = createQTuple(Regs);
1314 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1315 RegSeq->getValueType(0), MVT::Other};
1318 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1320 SDValue Ops[] = {RegSeq,
1321 CurDAG->getTargetConstant(LaneNo, dl,
1322 MVT::i64), // Lane Number
1323 N->getOperand(NumVecs + 2), // Base register
1324 N->getOperand(NumVecs + 3), // Incremental
1326 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1328 // Update uses of the write back register
1329 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0));
1331 // Update uses of the vector list
1332 SDValue SuperReg = SDValue(Ld, 1);
1334 ReplaceUses(SDValue(N, 0),
1335 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg);
1337 EVT WideVT = RegSeq.getOperand(1)->getValueType(0);
1338 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1,
1339 AArch64::qsub2, AArch64::qsub3 };
1340 for (unsigned i = 0; i < NumVecs; ++i) {
1341 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT,
1344 NV = NarrowVector(NV, *CurDAG);
1345 ReplaceUses(SDValue(N, i), NV);
1350 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2));
1355 SDNode *AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
1358 EVT VT = N->getOperand(2)->getValueType(0);
1359 bool Narrow = VT.getSizeInBits() == 64;
1361 // Form a REG_SEQUENCE to force register allocation.
1362 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
1365 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1366 WidenVector(*CurDAG));
1368 SDValue RegSeq = createQTuple(Regs);
1371 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
1373 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1374 N->getOperand(NumVecs + 3), N->getOperand(0)};
1375 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
1377 // Transfer memoperands.
1378 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1379 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1380 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1385 SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
1388 EVT VT = N->getOperand(2)->getValueType(0);
1389 bool Narrow = VT.getSizeInBits() == 64;
1391 // Form a REG_SEQUENCE to force register allocation.
1392 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs);
1395 std::transform(Regs.begin(), Regs.end(), Regs.begin(),
1396 WidenVector(*CurDAG));
1398 SDValue RegSeq = createQTuple(Regs);
1400 const EVT ResTys[] = {MVT::i64, // Type of the write back register
1404 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
1406 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64),
1407 N->getOperand(NumVecs + 2), // Base Register
1408 N->getOperand(NumVecs + 3), // Incremental
1410 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1412 // Transfer memoperands.
1413 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1414 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1415 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
1420 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
1421 unsigned &Opc, SDValue &Opd0,
1422 unsigned &LSB, unsigned &MSB,
1423 unsigned NumberOfIgnoredLowBits,
1424 bool BiggerPattern) {
1425 assert(N->getOpcode() == ISD::AND &&
1426 "N must be a AND operation to call this function");
1428 EVT VT = N->getValueType(0);
1430 // Here we can test the type of VT and return false when the type does not
1431 // match, but since it is done prior to that call in the current context
1432 // we turned that into an assert to avoid redundant code.
1433 assert((VT == MVT::i32 || VT == MVT::i64) &&
1434 "Type checking must have been done before calling this function");
1436 // FIXME: simplify-demanded-bits in DAGCombine will probably have
1437 // changed the AND node to a 32-bit mask operation. We'll have to
1438 // undo that as part of the transform here if we want to catch all
1439 // the opportunities.
1440 // Currently the NumberOfIgnoredLowBits argument helps to recover
1441 // form these situations when matching bigger pattern (bitfield insert).
1443 // For unsigned extracts, check for a shift right and mask
1444 uint64_t And_imm = 0;
1445 if (!isOpcWithIntImmediate(N, ISD::AND, And_imm))
1448 const SDNode *Op0 = N->getOperand(0).getNode();
1450 // Because of simplify-demanded-bits in DAGCombine, the mask may have been
1451 // simplified. Try to undo that
1452 And_imm |= (1 << NumberOfIgnoredLowBits) - 1;
1454 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1455 if (And_imm & (And_imm + 1))
1458 bool ClampMSB = false;
1459 uint64_t Srl_imm = 0;
1460 // Handle the SRL + ANY_EXTEND case.
1461 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND &&
1462 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, Srl_imm)) {
1463 // Extend the incoming operand of the SRL to 64-bit.
1464 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0));
1465 // Make sure to clamp the MSB so that we preserve the semantics of the
1466 // original operations.
1468 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE &&
1469 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL,
1471 // If the shift result was truncated, we can still combine them.
1472 Opd0 = Op0->getOperand(0).getOperand(0);
1474 // Use the type of SRL node.
1475 VT = Opd0->getValueType(0);
1476 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, Srl_imm)) {
1477 Opd0 = Op0->getOperand(0);
1478 } else if (BiggerPattern) {
1479 // Let's pretend a 0 shift right has been performed.
1480 // The resulting code will be at least as good as the original one
1481 // plus it may expose more opportunities for bitfield insert pattern.
1482 // FIXME: Currently we limit this to the bigger pattern, because
1483 // some optimizations expect AND and not UBFM.
1484 Opd0 = N->getOperand(0);
1488 // Bail out on large immediates. This happens when no proper
1489 // combining/constant folding was performed.
1490 if (!BiggerPattern && (Srl_imm <= 0 || Srl_imm >= VT.getSizeInBits())) {
1492 << ": Found large shift immediate, this should not happen\n"));
1497 MSB = Srl_imm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(And_imm)
1498 : countTrailingOnes<uint64_t>(And_imm)) -
1501 // Since we're moving the extend before the right shift operation, we need
1502 // to clamp the MSB to make sure we don't shift in undefined bits instead of
1503 // the zeros which would get shifted in with the original right shift
1505 MSB = MSB > 31 ? 31 : MSB;
1507 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1511 static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
1512 SDValue &Opd0, unsigned &LSB,
1514 // We are looking for the following pattern which basically extracts several
1515 // continuous bits from the source value and places it from the LSB of the
1516 // destination value, all other bits of the destination value or set to zero:
1518 // Value2 = AND Value, MaskImm
1519 // SRL Value2, ShiftImm
1521 // with MaskImm >> ShiftImm to search for the bit width.
1523 // This gets selected into a single UBFM:
1525 // UBFM Value, ShiftImm, BitWide + Srl_imm -1
1528 if (N->getOpcode() != ISD::SRL)
1531 uint64_t And_mask = 0;
1532 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, And_mask))
1535 Opd0 = N->getOperand(0).getOperand(0);
1537 uint64_t Srl_imm = 0;
1538 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1541 // Check whether we really have several bits extract here.
1542 unsigned BitWide = 64 - countLeadingOnes(~(And_mask >> Srl_imm));
1543 if (BitWide && isMask_64(And_mask >> Srl_imm)) {
1544 if (N->getValueType(0) == MVT::i32)
1545 Opc = AArch64::UBFMWri;
1547 Opc = AArch64::UBFMXri;
1550 MSB = BitWide + Srl_imm - 1;
1557 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
1558 unsigned &Immr, unsigned &Imms,
1559 bool BiggerPattern) {
1560 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
1561 "N must be a SHR/SRA operation to call this function");
1563 EVT VT = N->getValueType(0);
1565 // Here we can test the type of VT and return false when the type does not
1566 // match, but since it is done prior to that call in the current context
1567 // we turned that into an assert to avoid redundant code.
1568 assert((VT == MVT::i32 || VT == MVT::i64) &&
1569 "Type checking must have been done before calling this function");
1571 // Check for AND + SRL doing several bits extract.
1572 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms))
1575 // we're looking for a shift of a shift
1576 uint64_t Shl_imm = 0;
1577 uint64_t Trunc_bits = 0;
1578 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1579 Opd0 = N->getOperand(0).getOperand(0);
1580 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL &&
1581 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) {
1582 // We are looking for a shift of truncate. Truncate from i64 to i32 could
1583 // be considered as setting high 32 bits as zero. Our strategy here is to
1584 // always generate 64bit UBFM. This consistency will help the CSE pass
1585 // later find more redundancy.
1586 Opd0 = N->getOperand(0).getOperand(0);
1587 Trunc_bits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits();
1588 VT = Opd0->getValueType(0);
1589 assert(VT == MVT::i64 && "the promoted type should be i64");
1590 } else if (BiggerPattern) {
1591 // Let's pretend a 0 shift left has been performed.
1592 // FIXME: Currently we limit this to the bigger pattern case,
1593 // because some optimizations expect AND and not UBFM
1594 Opd0 = N->getOperand(0);
1598 // Missing combines/constant folding may have left us with strange
1600 if (Shl_imm >= VT.getSizeInBits()) {
1602 << ": Found large shift immediate, this should not happen\n"));
1606 uint64_t Srl_imm = 0;
1607 if (!isIntImmediate(N->getOperand(1), Srl_imm))
1610 assert(Srl_imm > 0 && Srl_imm < VT.getSizeInBits() &&
1611 "bad amount in shift node!");
1612 int immr = Srl_imm - Shl_imm;
1613 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr;
1614 Imms = VT.getSizeInBits() - Shl_imm - Trunc_bits - 1;
1615 // SRA requires a signed extraction
1617 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri;
1619 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri;
1623 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
1624 SDValue &Opd0, unsigned &Immr, unsigned &Imms,
1625 unsigned NumberOfIgnoredLowBits = 0,
1626 bool BiggerPattern = false) {
1627 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64)
1630 switch (N->getOpcode()) {
1632 if (!N->isMachineOpcode())
1636 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms,
1637 NumberOfIgnoredLowBits, BiggerPattern);
1640 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern);
1643 unsigned NOpc = N->getMachineOpcode();
1647 case AArch64::SBFMWri:
1648 case AArch64::UBFMWri:
1649 case AArch64::SBFMXri:
1650 case AArch64::UBFMXri:
1652 Opd0 = N->getOperand(0);
1653 Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
1654 Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
1661 SDNode *AArch64DAGToDAGISel::SelectBitfieldExtractOp(SDNode *N) {
1662 unsigned Opc, Immr, Imms;
1664 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms))
1667 EVT VT = N->getValueType(0);
1670 // If the bit extract operation is 64bit but the original type is 32bit, we
1671 // need to add one EXTRACT_SUBREG.
1672 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) {
1673 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64),
1674 CurDAG->getTargetConstant(Imms, dl, MVT::i64)};
1676 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64);
1677 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32);
1678 MachineSDNode *Node =
1679 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i32,
1680 SDValue(BFM, 0), SubReg);
1684 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT),
1685 CurDAG->getTargetConstant(Imms, dl, VT)};
1686 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
1689 /// Does DstMask form a complementary pair with the mask provided by
1690 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
1691 /// this asks whether DstMask zeroes precisely those bits that will be set by
1693 static bool isBitfieldDstMask(uint64_t DstMask, APInt BitsToBeInserted,
1694 unsigned NumberOfIgnoredHighBits, EVT VT) {
1695 assert((VT == MVT::i32 || VT == MVT::i64) &&
1696 "i32 or i64 mask type expected!");
1697 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits;
1699 APInt SignificantDstMask = APInt(BitWidth, DstMask);
1700 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth);
1702 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 &&
1703 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue();
1706 // Look for bits that will be useful for later uses.
1707 // A bit is consider useless as soon as it is dropped and never used
1708 // before it as been dropped.
1709 // E.g., looking for useful bit of x
1712 // After #1, x useful bits are 0x7, then the useful bits of x, live through
1714 // After #2, the useful bits of x are 0x4.
1715 // However, if x is used on an unpredicatable instruction, then all its bits
1721 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);
1723 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
1726 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1727 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth());
1728 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm);
1729 getUsefulBits(Op, UsefulBits, Depth + 1);
1732 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
1733 uint64_t Imm, uint64_t MSB,
1735 // inherit the bitwidth value
1736 APInt OpUsefulBits(UsefulBits);
1740 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1742 // The interesting part will be in the lower part of the result
1743 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1744 // The interesting part was starting at Imm in the argument
1745 OpUsefulBits = OpUsefulBits.shl(Imm);
1747 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1749 // The interesting part will be shifted in the result
1750 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
1751 getUsefulBits(Op, OpUsefulBits, Depth + 1);
1752 // The interesting part was at zero in the argument
1753 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
1756 UsefulBits &= OpUsefulBits;
1759 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
1762 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue();
1764 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1766 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1769 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
1771 uint64_t ShiftTypeAndValue =
1772 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1773 APInt Mask(UsefulBits);
1774 Mask.clearAllBits();
1777 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) {
1779 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1780 Mask = Mask.shl(ShiftAmt);
1781 getUsefulBits(Op, Mask, Depth + 1);
1782 Mask = Mask.lshr(ShiftAmt);
1783 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
1785 // We do not handle AArch64_AM::ASR, because the sign will change the
1786 // number of useful bits
1787 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
1788 Mask = Mask.lshr(ShiftAmt);
1789 getUsefulBits(Op, Mask, Depth + 1);
1790 Mask = Mask.shl(ShiftAmt);
1797 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
1800 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1802 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue();
1804 if (Op.getOperand(1) == Orig)
1805 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth);
1807 APInt OpUsefulBits(UsefulBits);
1811 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1);
1813 UsefulBits &= ~OpUsefulBits;
1814 getUsefulBits(Op, UsefulBits, Depth + 1);
1816 OpUsefulBits = OpUsefulBits.shl(MSB + 1);
1818 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm));
1819 getUsefulBits(Op, UsefulBits, Depth + 1);
1823 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
1824 SDValue Orig, unsigned Depth) {
1826 // Users of this node should have already been instruction selected
1827 // FIXME: Can we turn that into an assert?
1828 if (!UserNode->isMachineOpcode())
1831 switch (UserNode->getMachineOpcode()) {
1834 case AArch64::ANDSWri:
1835 case AArch64::ANDSXri:
1836 case AArch64::ANDWri:
1837 case AArch64::ANDXri:
1838 // We increment Depth only when we call the getUsefulBits
1839 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits,
1841 case AArch64::UBFMWri:
1842 case AArch64::UBFMXri:
1843 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth);
1845 case AArch64::ORRWrs:
1846 case AArch64::ORRXrs:
1847 if (UserNode->getOperand(1) != Orig)
1849 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits,
1851 case AArch64::BFMWri:
1852 case AArch64::BFMXri:
1853 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth);
1857 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
1860 // Initialize UsefulBits
1862 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
1863 // At the beginning, assume every produced bits is useful
1864 UsefulBits = APInt(Bitwidth, 0);
1865 UsefulBits.flipAllBits();
1867 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0);
1869 for (SDNode *Node : Op.getNode()->uses()) {
1870 // A use cannot produce useful bits
1871 APInt UsefulBitsForUse = APInt(UsefulBits);
1872 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth);
1873 UsersUsefulBits |= UsefulBitsForUse;
1875 // UsefulBits contains the produced bits that are meaningful for the
1876 // current definition, thus a user cannot make a bit meaningful at
1878 UsefulBits &= UsersUsefulBits;
1881 /// Create a machine node performing a notional SHL of Op by ShlAmount. If
1882 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
1883 /// 0, return Op unchanged.
1884 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {
1888 EVT VT = Op.getValueType();
1890 unsigned BitWidth = VT.getSizeInBits();
1891 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri;
1894 if (ShlAmount > 0) {
1895 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt
1896 ShiftNode = CurDAG->getMachineNode(
1897 UBFMOpc, dl, VT, Op,
1898 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT),
1899 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT));
1901 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1
1902 assert(ShlAmount < 0 && "expected right shift");
1903 int ShrAmount = -ShlAmount;
1904 ShiftNode = CurDAG->getMachineNode(
1905 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT),
1906 CurDAG->getTargetConstant(BitWidth - 1, dl, VT));
1909 return SDValue(ShiftNode, 0);
1912 /// Does this tree qualify as an attempt to move a bitfield into position,
1913 /// essentially "(and (shl VAL, N), Mask)".
1914 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
1916 SDValue &Src, int &ShiftAmount,
1918 EVT VT = Op.getValueType();
1919 unsigned BitWidth = VT.getSizeInBits();
1921 assert(BitWidth == 32 || BitWidth == 64);
1923 APInt KnownZero, KnownOne;
1924 CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
1926 // Non-zero in the sense that they're not provably zero, which is the key
1927 // point if we want to use this value
1928 uint64_t NonZeroBits = (~KnownZero).getZExtValue();
1930 // Discard a constant AND mask if present. It's safe because the node will
1931 // already have been factored into the computeKnownBits calculation above.
1933 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
1934 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
1935 Op = Op.getOperand(0);
1938 // Don't match if the SHL has more than one use, since then we'll end up
1939 // generating SHL+UBFIZ instead of just keeping SHL+AND.
1940 if (!BiggerPattern && !Op.hasOneUse())
1944 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm))
1946 Op = Op.getOperand(0);
1948 if (!isShiftedMask_64(NonZeroBits))
1951 ShiftAmount = countTrailingZeros(NonZeroBits);
1952 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount);
1954 // BFI encompasses sufficiently many nodes that it's worth inserting an extra
1955 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL
1956 // amount. BiggerPattern is true when this pattern is being matched for BFI,
1957 // BiggerPattern is false when this pattern is being matched for UBFIZ, in
1958 // which case it is not profitable to insert an extra shift.
1959 if (ShlImm - ShiftAmount != 0 && !BiggerPattern)
1961 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount);
1966 // Given a OR operation, check if we have the following pattern
1967 // ubfm c, b, imm, imm2 (or something that does the same jobs, see
1968 // isBitfieldExtractOp)
1969 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and
1970 // countTrailingZeros(mask2) == imm2 - imm + 1
1972 // if yes, given reference arguments will be update so that one can replace
1973 // the OR instruction with:
1974 // f = Opc Opd0, Opd1, LSB, MSB ; where Opc is a BFM, LSB = imm, and MSB = imm2
1975 static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
1976 SDValue &Src, unsigned &ImmR,
1977 unsigned &ImmS, SelectionDAG *CurDAG) {
1978 assert(N->getOpcode() == ISD::OR && "Expect a OR operation");
1981 EVT VT = N->getValueType(0);
1983 Opc = AArch64::BFMWri;
1984 else if (VT == MVT::i64)
1985 Opc = AArch64::BFMXri;
1989 // Because of simplify-demanded-bits in DAGCombine, involved masks may not
1990 // have the expected shape. Try to undo that.
1992 getUsefulBits(SDValue(N, 0), UsefulBits);
1994 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros();
1995 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros();
1997 // OR is commutative, check all combinations of operand order and values of
1998 // BiggerPattern, i.e.
1999 // Opd0, Opd1, BiggerPattern=false
2000 // Opd1, Opd0, BiggerPattern=false
2001 // Opd0, Opd1, BiggerPattern=true
2002 // Opd1, Opd0, BiggerPattern=true
2003 // Several of these combinations may match, so check with BiggerPattern=false
2004 // first since that will produce better results by matching more instructions
2005 // and/or inserting fewer extra instructions.
2006 for (int I = 0; I < 4; ++I) {
2008 bool BiggerPattern = I / 2;
2009 SDNode *OrOpd0 = N->getOperand(I % 2).getNode();
2010 SDValue OrOpd1Val = N->getOperand((I + 1) % 2);
2011 SDNode *OrOpd1 = OrOpd1Val.getNode();
2015 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS,
2016 NumberOfIgnoredLowBits, BiggerPattern)) {
2017 // Check that the returned opcode is compatible with the pattern,
2018 // i.e., same type and zero extended (U and not S)
2019 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) ||
2020 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32))
2023 // Compute the width of the bitfield insertion
2025 Width = ImmS - ImmR + 1;
2026 // FIXME: This constraint is to catch bitfield insertion we may
2027 // want to widen the pattern if we want to grab general bitfied
2032 // If the mask on the insertee is correct, we have a BFXIL operation. We
2033 // can share the ImmR and ImmS values from the already-computed UBFM.
2034 } else if (isBitfieldPositioningOp(CurDAG, SDValue(OrOpd0, 0),
2036 Src, DstLSB, Width)) {
2037 ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
2042 // Check the second part of the pattern
2043 EVT VT = OrOpd1->getValueType(0);
2044 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand");
2046 // Compute the Known Zero for the candidate of the first operand.
2047 // This allows to catch more general case than just looking for
2048 // AND with imm. Indeed, simplify-demanded-bits may have removed
2049 // the AND instruction because it proves it was useless.
2050 APInt KnownZero, KnownOne;
2051 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
2053 // Check if there is enough room for the second operand to appear
2055 APInt BitsToBeInserted =
2056 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width);
2058 if ((BitsToBeInserted & ~KnownZero) != 0)
2061 // Set the first operand
2063 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) &&
2064 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT))
2065 // In that case, we can eliminate the AND
2066 Dst = OrOpd1->getOperand(0);
2068 // Maybe the AND has been removed by simplify-demanded-bits
2069 // or is useful because it discards more bits
2079 SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertOp(SDNode *N) {
2080 if (N->getOpcode() != ISD::OR)
2087 if (!isBitfieldInsertOpFromOr(N, Opc, Opd0, Opd1, LSB, MSB, CurDAG))
2090 EVT VT = N->getValueType(0);
2092 SDValue Ops[] = { Opd0,
2094 CurDAG->getTargetConstant(LSB, dl, VT),
2095 CurDAG->getTargetConstant(MSB, dl, VT) };
2096 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2099 /// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
2100 /// equivalent of a left shift by a constant amount followed by an and masking
2101 /// out a contiguous set of bits.
2102 SDNode *AArch64DAGToDAGISel::SelectBitfieldInsertInZeroOp(SDNode *N) {
2103 if (N->getOpcode() != ISD::AND)
2106 EVT VT = N->getValueType(0);
2109 Opc = AArch64::UBFMWri;
2110 else if (VT == MVT::i64)
2111 Opc = AArch64::UBFMXri;
2117 if (!isBitfieldPositioningOp(CurDAG, SDValue(N, 0), /*BiggerPattern=*/false,
2118 Op0, DstLSB, Width))
2121 // ImmR is the rotate right amount.
2122 unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits();
2123 // ImmS is the most significant bit of the source to be moved.
2124 unsigned ImmS = Width - 1;
2127 SDValue Ops[] = {Op0, CurDAG->getTargetConstant(ImmR, DL, VT),
2128 CurDAG->getTargetConstant(ImmS, DL, VT)};
2129 return CurDAG->SelectNodeTo(N, Opc, VT, Ops);
2133 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
2134 unsigned RegWidth) {
2136 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
2137 FVal = CN->getValueAPF();
2138 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) {
2139 // Some otherwise illegal constants are allowed in this case.
2140 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow ||
2141 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)))
2144 ConstantPoolSDNode *CN =
2145 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1));
2146 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF();
2150 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
2151 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
2154 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
2155 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
2159 // fbits is between 1 and 64 in the worst-case, which means the fmul
2160 // could have 2^64 as an actual operand. Need 65 bits of precision.
2161 APSInt IntVal(65, true);
2162 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
2164 // N.b. isPowerOf2 also checks for > 0.
2165 if (!IsExact || !IntVal.isPowerOf2()) return false;
2166 unsigned FBits = IntVal.logBase2();
2168 // Checks above should have guaranteed that we haven't lost information in
2169 // finding FBits, but it must still be in range.
2170 if (FBits == 0 || FBits > RegWidth) return false;
2172 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32);
2176 // Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
2177 // of the string and obtains the integer values from them and combines these
2178 // into a single value to be used in the MRS/MSR instruction.
2179 static int getIntOperandFromRegisterString(StringRef RegString) {
2180 SmallVector<StringRef, 5> Fields;
2181 RegString.split(Fields, ':');
2183 if (Fields.size() == 1)
2186 assert(Fields.size() == 5
2187 && "Invalid number of fields in read register string");
2189 SmallVector<int, 5> Ops;
2190 bool AllIntFields = true;
2192 for (StringRef Field : Fields) {
2194 AllIntFields &= !Field.getAsInteger(10, IntField);
2195 Ops.push_back(IntField);
2198 assert(AllIntFields &&
2199 "Unexpected non-integer value in special register string.");
2201 // Need to combine the integer fields of the string into a single value
2202 // based on the bit encoding of MRS/MSR instruction.
2203 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) |
2204 (Ops[3] << 3) | (Ops[4]);
2207 // Lower the read_register intrinsic to an MRS instruction node if the special
2208 // register string argument is either of the form detailed in the ALCE (the
2209 // form described in getIntOperandsFromRegsterString) or is a named register
2210 // known by the MRS SysReg mapper.
2211 SDNode *AArch64DAGToDAGISel::SelectReadRegister(SDNode *N) {
2212 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2213 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2216 int Reg = getIntOperandFromRegisterString(RegString->getString());
2218 return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
2220 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2223 // Use the sysreg mapper to map the remaining possible strings to the
2224 // value for the register to be used for the instruction operand.
2225 AArch64SysReg::MRSMapper mapper;
2226 bool IsValidSpecialReg;
2227 Reg = mapper.fromString(RegString->getString(),
2228 Subtarget->getFeatureBits(),
2230 if (IsValidSpecialReg)
2231 return CurDAG->getMachineNode(AArch64::MRS, DL, N->getSimpleValueType(0),
2233 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2239 // Lower the write_register intrinsic to an MSR instruction node if the special
2240 // register string argument is either of the form detailed in the ALCE (the
2241 // form described in getIntOperandsFromRegsterString) or is a named register
2242 // known by the MSR SysReg mapper.
2243 SDNode *AArch64DAGToDAGISel::SelectWriteRegister(SDNode *N) {
2244 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
2245 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
2248 int Reg = getIntOperandFromRegisterString(RegString->getString());
2250 return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
2251 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2252 N->getOperand(2), N->getOperand(0));
2254 // Check if the register was one of those allowed as the pstatefield value in
2255 // the MSR (immediate) instruction. To accept the values allowed in the
2256 // pstatefield for the MSR (immediate) instruction, we also require that an
2257 // immediate value has been provided as an argument, we know that this is
2258 // the case as it has been ensured by semantic checking.
2259 AArch64PState::PStateMapper PMapper;
2260 bool IsValidSpecialReg;
2261 Reg = PMapper.fromString(RegString->getString(),
2262 Subtarget->getFeatureBits(),
2264 if (IsValidSpecialReg) {
2265 assert (isa<ConstantSDNode>(N->getOperand(2))
2266 && "Expected a constant integer expression.");
2267 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
2269 if (Reg == AArch64PState::PAN || Reg == AArch64PState::UAO) {
2270 assert(Immed < 2 && "Bad imm");
2271 State = AArch64::MSRpstateImm1;
2273 assert(Immed < 16 && "Bad imm");
2274 State = AArch64::MSRpstateImm4;
2276 return CurDAG->getMachineNode(State, DL, MVT::Other,
2277 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2278 CurDAG->getTargetConstant(Immed, DL, MVT::i16),
2282 // Use the sysreg mapper to attempt to map the remaining possible strings
2283 // to the value for the register to be used for the MSR (register)
2284 // instruction operand.
2285 AArch64SysReg::MSRMapper Mapper;
2286 Reg = Mapper.fromString(RegString->getString(),
2287 Subtarget->getFeatureBits(),
2290 if (IsValidSpecialReg)
2291 return CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other,
2292 CurDAG->getTargetConstant(Reg, DL, MVT::i32),
2293 N->getOperand(2), N->getOperand(0));
2298 SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
2299 // Dump information about the Node being selected
2300 DEBUG(errs() << "Selecting: ");
2301 DEBUG(Node->dump(CurDAG));
2302 DEBUG(errs() << "\n");
2304 // If we have a custom node, we already have selected!
2305 if (Node->isMachineOpcode()) {
2306 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
2307 Node->setNodeId(-1);
2311 // Few custom selection stuff.
2312 SDNode *ResNode = nullptr;
2313 EVT VT = Node->getValueType(0);
2315 switch (Node->getOpcode()) {
2319 case ISD::READ_REGISTER:
2320 if (SDNode *Res = SelectReadRegister(Node))
2324 case ISD::WRITE_REGISTER:
2325 if (SDNode *Res = SelectWriteRegister(Node))
2330 if (SDNode *I = SelectMLAV64LaneV128(Node))
2335 // Try to select as an indexed load. Fall through to normal processing
2338 SDNode *I = SelectIndexedLoad(Node, Done);
2347 if (SDNode *I = SelectBitfieldExtractOp(Node))
2349 if (SDNode *I = SelectBitfieldInsertInZeroOp(Node))
2354 if (SDNode *I = SelectBitfieldInsertOp(Node))
2358 case ISD::EXTRACT_VECTOR_ELT: {
2359 // Extracting lane zero is a special case where we can just use a plain
2360 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for
2361 // the rest of the compiler, especially the register allocator and copyi
2362 // propagation, to reason about, so is preferred when it's possible to
2364 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1));
2365 // Bail and use the default Select() for non-zero lanes.
2366 if (LaneNode->getZExtValue() != 0)
2368 // If the element type is not the same as the result type, likewise
2369 // bail and use the default Select(), as there's more to do than just
2370 // a cross-class COPY. This catches extracts of i8 and i16 elements
2371 // since they will need an explicit zext.
2372 if (VT != Node->getOperand(0).getValueType().getVectorElementType())
2375 switch (Node->getOperand(0)
2377 .getVectorElementType()
2380 llvm_unreachable("Unexpected vector element type!");
2382 SubReg = AArch64::dsub;
2385 SubReg = AArch64::ssub;
2388 SubReg = AArch64::hsub;
2391 llvm_unreachable("unexpected zext-requiring extract element!");
2393 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT,
2394 Node->getOperand(0));
2395 DEBUG(dbgs() << "ISEL: Custom selection!\n=> ");
2396 DEBUG(Extract->dumpr(CurDAG));
2397 DEBUG(dbgs() << "\n");
2398 return Extract.getNode();
2400 case ISD::Constant: {
2401 // Materialize zero constants as copies from WZR/XZR. This allows
2402 // the coalescer to propagate these into other instructions.
2403 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node);
2404 if (ConstNode->isNullValue()) {
2406 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2407 AArch64::WZR, MVT::i32).getNode();
2408 else if (VT == MVT::i64)
2409 return CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
2410 AArch64::XZR, MVT::i64).getNode();
2415 case ISD::FrameIndex: {
2416 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm.
2417 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
2418 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
2419 const TargetLowering *TLI = getTargetLowering();
2420 SDValue TFI = CurDAG->getTargetFrameIndex(
2421 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
2423 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32),
2424 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) };
2425 return CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops);
2427 case ISD::INTRINSIC_W_CHAIN: {
2428 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2432 case Intrinsic::aarch64_ldaxp:
2433 case Intrinsic::aarch64_ldxp: {
2435 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX;
2436 SDValue MemAddr = Node->getOperand(2);
2438 SDValue Chain = Node->getOperand(0);
2440 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64,
2441 MVT::Other, MemAddr, Chain);
2443 // Transfer memoperands.
2444 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2445 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2446 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
2449 case Intrinsic::aarch64_stlxp:
2450 case Intrinsic::aarch64_stxp: {
2452 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX;
2454 SDValue Chain = Node->getOperand(0);
2455 SDValue ValLo = Node->getOperand(2);
2456 SDValue ValHi = Node->getOperand(3);
2457 SDValue MemAddr = Node->getOperand(4);
2459 // Place arguments in the right order.
2460 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain};
2462 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops);
2463 // Transfer memoperands.
2464 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2465 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand();
2466 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
2470 case Intrinsic::aarch64_neon_ld1x2:
2471 if (VT == MVT::v8i8)
2472 return SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0);
2473 else if (VT == MVT::v16i8)
2474 return SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0);
2475 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2476 return SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0);
2477 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2478 return SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0);
2479 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2480 return SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0);
2481 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2482 return SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0);
2483 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2484 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2485 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2486 return SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0);
2488 case Intrinsic::aarch64_neon_ld1x3:
2489 if (VT == MVT::v8i8)
2490 return SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0);
2491 else if (VT == MVT::v16i8)
2492 return SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0);
2493 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2494 return SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0);
2495 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2496 return SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0);
2497 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2498 return SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0);
2499 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2500 return SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0);
2501 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2502 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2503 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2504 return SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0);
2506 case Intrinsic::aarch64_neon_ld1x4:
2507 if (VT == MVT::v8i8)
2508 return SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0);
2509 else if (VT == MVT::v16i8)
2510 return SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0);
2511 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2512 return SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0);
2513 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2514 return SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0);
2515 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2516 return SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0);
2517 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2518 return SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0);
2519 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2520 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2521 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2522 return SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0);
2524 case Intrinsic::aarch64_neon_ld2:
2525 if (VT == MVT::v8i8)
2526 return SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0);
2527 else if (VT == MVT::v16i8)
2528 return SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0);
2529 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2530 return SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0);
2531 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2532 return SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0);
2533 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2534 return SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0);
2535 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2536 return SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0);
2537 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2538 return SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0);
2539 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2540 return SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0);
2542 case Intrinsic::aarch64_neon_ld3:
2543 if (VT == MVT::v8i8)
2544 return SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0);
2545 else if (VT == MVT::v16i8)
2546 return SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0);
2547 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2548 return SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0);
2549 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2550 return SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0);
2551 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2552 return SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0);
2553 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2554 return SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0);
2555 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2556 return SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0);
2557 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2558 return SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0);
2560 case Intrinsic::aarch64_neon_ld4:
2561 if (VT == MVT::v8i8)
2562 return SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0);
2563 else if (VT == MVT::v16i8)
2564 return SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0);
2565 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2566 return SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0);
2567 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2568 return SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0);
2569 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2570 return SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0);
2571 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2572 return SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0);
2573 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2574 return SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0);
2575 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2576 return SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0);
2578 case Intrinsic::aarch64_neon_ld2r:
2579 if (VT == MVT::v8i8)
2580 return SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0);
2581 else if (VT == MVT::v16i8)
2582 return SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0);
2583 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2584 return SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0);
2585 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2586 return SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0);
2587 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2588 return SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0);
2589 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2590 return SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0);
2591 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2592 return SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0);
2593 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2594 return SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0);
2596 case Intrinsic::aarch64_neon_ld3r:
2597 if (VT == MVT::v8i8)
2598 return SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0);
2599 else if (VT == MVT::v16i8)
2600 return SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0);
2601 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2602 return SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0);
2603 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2604 return SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0);
2605 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2606 return SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0);
2607 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2608 return SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0);
2609 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2610 return SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0);
2611 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2612 return SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0);
2614 case Intrinsic::aarch64_neon_ld4r:
2615 if (VT == MVT::v8i8)
2616 return SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0);
2617 else if (VT == MVT::v16i8)
2618 return SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0);
2619 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2620 return SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0);
2621 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2622 return SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0);
2623 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2624 return SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0);
2625 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2626 return SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0);
2627 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2628 return SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0);
2629 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2630 return SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0);
2632 case Intrinsic::aarch64_neon_ld2lane:
2633 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2634 return SelectLoadLane(Node, 2, AArch64::LD2i8);
2635 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2637 return SelectLoadLane(Node, 2, AArch64::LD2i16);
2638 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2640 return SelectLoadLane(Node, 2, AArch64::LD2i32);
2641 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2643 return SelectLoadLane(Node, 2, AArch64::LD2i64);
2645 case Intrinsic::aarch64_neon_ld3lane:
2646 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2647 return SelectLoadLane(Node, 3, AArch64::LD3i8);
2648 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2650 return SelectLoadLane(Node, 3, AArch64::LD3i16);
2651 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2653 return SelectLoadLane(Node, 3, AArch64::LD3i32);
2654 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2656 return SelectLoadLane(Node, 3, AArch64::LD3i64);
2658 case Intrinsic::aarch64_neon_ld4lane:
2659 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2660 return SelectLoadLane(Node, 4, AArch64::LD4i8);
2661 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2663 return SelectLoadLane(Node, 4, AArch64::LD4i16);
2664 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2666 return SelectLoadLane(Node, 4, AArch64::LD4i32);
2667 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2669 return SelectLoadLane(Node, 4, AArch64::LD4i64);
2673 case ISD::INTRINSIC_WO_CHAIN: {
2674 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
2678 case Intrinsic::aarch64_neon_tbl2:
2679 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBLv8i8Two
2680 : AArch64::TBLv16i8Two,
2682 case Intrinsic::aarch64_neon_tbl3:
2683 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three
2684 : AArch64::TBLv16i8Three,
2686 case Intrinsic::aarch64_neon_tbl4:
2687 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four
2688 : AArch64::TBLv16i8Four,
2690 case Intrinsic::aarch64_neon_tbx2:
2691 return SelectTable(Node, 2, VT == MVT::v8i8 ? AArch64::TBXv8i8Two
2692 : AArch64::TBXv16i8Two,
2694 case Intrinsic::aarch64_neon_tbx3:
2695 return SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three
2696 : AArch64::TBXv16i8Three,
2698 case Intrinsic::aarch64_neon_tbx4:
2699 return SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four
2700 : AArch64::TBXv16i8Four,
2702 case Intrinsic::aarch64_neon_smull:
2703 case Intrinsic::aarch64_neon_umull:
2704 if (SDNode *N = SelectMULLV64LaneV128(IntNo, Node))
2710 case ISD::INTRINSIC_VOID: {
2711 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2712 if (Node->getNumOperands() >= 3)
2713 VT = Node->getOperand(2)->getValueType(0);
2717 case Intrinsic::aarch64_neon_st1x2: {
2718 if (VT == MVT::v8i8)
2719 return SelectStore(Node, 2, AArch64::ST1Twov8b);
2720 else if (VT == MVT::v16i8)
2721 return SelectStore(Node, 2, AArch64::ST1Twov16b);
2722 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2723 return SelectStore(Node, 2, AArch64::ST1Twov4h);
2724 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2725 return SelectStore(Node, 2, AArch64::ST1Twov8h);
2726 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2727 return SelectStore(Node, 2, AArch64::ST1Twov2s);
2728 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2729 return SelectStore(Node, 2, AArch64::ST1Twov4s);
2730 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2731 return SelectStore(Node, 2, AArch64::ST1Twov2d);
2732 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2733 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2736 case Intrinsic::aarch64_neon_st1x3: {
2737 if (VT == MVT::v8i8)
2738 return SelectStore(Node, 3, AArch64::ST1Threev8b);
2739 else if (VT == MVT::v16i8)
2740 return SelectStore(Node, 3, AArch64::ST1Threev16b);
2741 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2742 return SelectStore(Node, 3, AArch64::ST1Threev4h);
2743 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2744 return SelectStore(Node, 3, AArch64::ST1Threev8h);
2745 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2746 return SelectStore(Node, 3, AArch64::ST1Threev2s);
2747 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2748 return SelectStore(Node, 3, AArch64::ST1Threev4s);
2749 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2750 return SelectStore(Node, 3, AArch64::ST1Threev2d);
2751 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2752 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2755 case Intrinsic::aarch64_neon_st1x4: {
2756 if (VT == MVT::v8i8)
2757 return SelectStore(Node, 4, AArch64::ST1Fourv8b);
2758 else if (VT == MVT::v16i8)
2759 return SelectStore(Node, 4, AArch64::ST1Fourv16b);
2760 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2761 return SelectStore(Node, 4, AArch64::ST1Fourv4h);
2762 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2763 return SelectStore(Node, 4, AArch64::ST1Fourv8h);
2764 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2765 return SelectStore(Node, 4, AArch64::ST1Fourv2s);
2766 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2767 return SelectStore(Node, 4, AArch64::ST1Fourv4s);
2768 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2769 return SelectStore(Node, 4, AArch64::ST1Fourv2d);
2770 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2771 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2774 case Intrinsic::aarch64_neon_st2: {
2775 if (VT == MVT::v8i8)
2776 return SelectStore(Node, 2, AArch64::ST2Twov8b);
2777 else if (VT == MVT::v16i8)
2778 return SelectStore(Node, 2, AArch64::ST2Twov16b);
2779 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2780 return SelectStore(Node, 2, AArch64::ST2Twov4h);
2781 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2782 return SelectStore(Node, 2, AArch64::ST2Twov8h);
2783 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2784 return SelectStore(Node, 2, AArch64::ST2Twov2s);
2785 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2786 return SelectStore(Node, 2, AArch64::ST2Twov4s);
2787 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2788 return SelectStore(Node, 2, AArch64::ST2Twov2d);
2789 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2790 return SelectStore(Node, 2, AArch64::ST1Twov1d);
2793 case Intrinsic::aarch64_neon_st3: {
2794 if (VT == MVT::v8i8)
2795 return SelectStore(Node, 3, AArch64::ST3Threev8b);
2796 else if (VT == MVT::v16i8)
2797 return SelectStore(Node, 3, AArch64::ST3Threev16b);
2798 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2799 return SelectStore(Node, 3, AArch64::ST3Threev4h);
2800 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2801 return SelectStore(Node, 3, AArch64::ST3Threev8h);
2802 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2803 return SelectStore(Node, 3, AArch64::ST3Threev2s);
2804 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2805 return SelectStore(Node, 3, AArch64::ST3Threev4s);
2806 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2807 return SelectStore(Node, 3, AArch64::ST3Threev2d);
2808 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2809 return SelectStore(Node, 3, AArch64::ST1Threev1d);
2812 case Intrinsic::aarch64_neon_st4: {
2813 if (VT == MVT::v8i8)
2814 return SelectStore(Node, 4, AArch64::ST4Fourv8b);
2815 else if (VT == MVT::v16i8)
2816 return SelectStore(Node, 4, AArch64::ST4Fourv16b);
2817 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2818 return SelectStore(Node, 4, AArch64::ST4Fourv4h);
2819 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2820 return SelectStore(Node, 4, AArch64::ST4Fourv8h);
2821 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2822 return SelectStore(Node, 4, AArch64::ST4Fourv2s);
2823 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2824 return SelectStore(Node, 4, AArch64::ST4Fourv4s);
2825 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2826 return SelectStore(Node, 4, AArch64::ST4Fourv2d);
2827 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2828 return SelectStore(Node, 4, AArch64::ST1Fourv1d);
2831 case Intrinsic::aarch64_neon_st2lane: {
2832 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2833 return SelectStoreLane(Node, 2, AArch64::ST2i8);
2834 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2836 return SelectStoreLane(Node, 2, AArch64::ST2i16);
2837 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2839 return SelectStoreLane(Node, 2, AArch64::ST2i32);
2840 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2842 return SelectStoreLane(Node, 2, AArch64::ST2i64);
2845 case Intrinsic::aarch64_neon_st3lane: {
2846 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2847 return SelectStoreLane(Node, 3, AArch64::ST3i8);
2848 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2850 return SelectStoreLane(Node, 3, AArch64::ST3i16);
2851 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2853 return SelectStoreLane(Node, 3, AArch64::ST3i32);
2854 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2856 return SelectStoreLane(Node, 3, AArch64::ST3i64);
2859 case Intrinsic::aarch64_neon_st4lane: {
2860 if (VT == MVT::v16i8 || VT == MVT::v8i8)
2861 return SelectStoreLane(Node, 4, AArch64::ST4i8);
2862 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
2864 return SelectStoreLane(Node, 4, AArch64::ST4i16);
2865 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
2867 return SelectStoreLane(Node, 4, AArch64::ST4i32);
2868 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
2870 return SelectStoreLane(Node, 4, AArch64::ST4i64);
2876 case AArch64ISD::LD2post: {
2877 if (VT == MVT::v8i8)
2878 return SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0);
2879 else if (VT == MVT::v16i8)
2880 return SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0);
2881 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2882 return SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0);
2883 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2884 return SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0);
2885 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2886 return SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0);
2887 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2888 return SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0);
2889 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2890 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2891 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2892 return SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0);
2895 case AArch64ISD::LD3post: {
2896 if (VT == MVT::v8i8)
2897 return SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0);
2898 else if (VT == MVT::v16i8)
2899 return SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0);
2900 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2901 return SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0);
2902 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2903 return SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0);
2904 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2905 return SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0);
2906 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2907 return SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0);
2908 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2909 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2910 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2911 return SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0);
2914 case AArch64ISD::LD4post: {
2915 if (VT == MVT::v8i8)
2916 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0);
2917 else if (VT == MVT::v16i8)
2918 return SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0);
2919 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2920 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0);
2921 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2922 return SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0);
2923 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2924 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0);
2925 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2926 return SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0);
2927 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2928 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2929 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2930 return SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0);
2933 case AArch64ISD::LD1x2post: {
2934 if (VT == MVT::v8i8)
2935 return SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0);
2936 else if (VT == MVT::v16i8)
2937 return SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0);
2938 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2939 return SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0);
2940 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2941 return SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0);
2942 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2943 return SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0);
2944 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2945 return SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0);
2946 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2947 return SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0);
2948 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2949 return SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0);
2952 case AArch64ISD::LD1x3post: {
2953 if (VT == MVT::v8i8)
2954 return SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0);
2955 else if (VT == MVT::v16i8)
2956 return SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0);
2957 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2958 return SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0);
2959 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2960 return SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0);
2961 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2962 return SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0);
2963 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2964 return SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0);
2965 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2966 return SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0);
2967 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2968 return SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0);
2971 case AArch64ISD::LD1x4post: {
2972 if (VT == MVT::v8i8)
2973 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0);
2974 else if (VT == MVT::v16i8)
2975 return SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0);
2976 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2977 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0);
2978 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2979 return SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0);
2980 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
2981 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0);
2982 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
2983 return SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0);
2984 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
2985 return SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0);
2986 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
2987 return SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0);
2990 case AArch64ISD::LD1DUPpost: {
2991 if (VT == MVT::v8i8)
2992 return SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0);
2993 else if (VT == MVT::v16i8)
2994 return SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0);
2995 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
2996 return SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0);
2997 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
2998 return SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0);
2999 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3000 return SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0);
3001 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3002 return SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0);
3003 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3004 return SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0);
3005 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3006 return SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0);
3009 case AArch64ISD::LD2DUPpost: {
3010 if (VT == MVT::v8i8)
3011 return SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0);
3012 else if (VT == MVT::v16i8)
3013 return SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0);
3014 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3015 return SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0);
3016 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3017 return SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0);
3018 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3019 return SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0);
3020 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3021 return SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0);
3022 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3023 return SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0);
3024 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3025 return SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0);
3028 case AArch64ISD::LD3DUPpost: {
3029 if (VT == MVT::v8i8)
3030 return SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0);
3031 else if (VT == MVT::v16i8)
3032 return SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0);
3033 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3034 return SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0);
3035 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3036 return SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0);
3037 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3038 return SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0);
3039 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3040 return SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0);
3041 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3042 return SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0);
3043 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3044 return SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0);
3047 case AArch64ISD::LD4DUPpost: {
3048 if (VT == MVT::v8i8)
3049 return SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0);
3050 else if (VT == MVT::v16i8)
3051 return SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0);
3052 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3053 return SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0);
3054 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3055 return SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0);
3056 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3057 return SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0);
3058 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3059 return SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0);
3060 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3061 return SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0);
3062 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3063 return SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0);
3066 case AArch64ISD::LD1LANEpost: {
3067 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3068 return SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST);
3069 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3071 return SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST);
3072 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3074 return SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST);
3075 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3077 return SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST);
3080 case AArch64ISD::LD2LANEpost: {
3081 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3082 return SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST);
3083 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3085 return SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST);
3086 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3088 return SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST);
3089 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3091 return SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST);
3094 case AArch64ISD::LD3LANEpost: {
3095 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3096 return SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST);
3097 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3099 return SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST);
3100 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3102 return SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST);
3103 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3105 return SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST);
3108 case AArch64ISD::LD4LANEpost: {
3109 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3110 return SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST);
3111 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3113 return SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST);
3114 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3116 return SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST);
3117 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3119 return SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST);
3122 case AArch64ISD::ST2post: {
3123 VT = Node->getOperand(1).getValueType();
3124 if (VT == MVT::v8i8)
3125 return SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST);
3126 else if (VT == MVT::v16i8)
3127 return SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST);
3128 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3129 return SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST);
3130 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3131 return SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST);
3132 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3133 return SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST);
3134 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3135 return SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST);
3136 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3137 return SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST);
3138 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3139 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3142 case AArch64ISD::ST3post: {
3143 VT = Node->getOperand(1).getValueType();
3144 if (VT == MVT::v8i8)
3145 return SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST);
3146 else if (VT == MVT::v16i8)
3147 return SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST);
3148 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3149 return SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST);
3150 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3151 return SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST);
3152 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3153 return SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST);
3154 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3155 return SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST);
3156 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3157 return SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST);
3158 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3159 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3162 case AArch64ISD::ST4post: {
3163 VT = Node->getOperand(1).getValueType();
3164 if (VT == MVT::v8i8)
3165 return SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST);
3166 else if (VT == MVT::v16i8)
3167 return SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST);
3168 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3169 return SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST);
3170 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3171 return SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST);
3172 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3173 return SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST);
3174 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3175 return SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST);
3176 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3177 return SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST);
3178 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3179 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3182 case AArch64ISD::ST1x2post: {
3183 VT = Node->getOperand(1).getValueType();
3184 if (VT == MVT::v8i8)
3185 return SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST);
3186 else if (VT == MVT::v16i8)
3187 return SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST);
3188 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3189 return SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST);
3190 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3191 return SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST);
3192 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3193 return SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST);
3194 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3195 return SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST);
3196 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3197 return SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST);
3198 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3199 return SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST);
3202 case AArch64ISD::ST1x3post: {
3203 VT = Node->getOperand(1).getValueType();
3204 if (VT == MVT::v8i8)
3205 return SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST);
3206 else if (VT == MVT::v16i8)
3207 return SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST);
3208 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3209 return SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST);
3210 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3211 return SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST);
3212 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3213 return SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST);
3214 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3215 return SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST);
3216 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3217 return SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST);
3218 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3219 return SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST);
3222 case AArch64ISD::ST1x4post: {
3223 VT = Node->getOperand(1).getValueType();
3224 if (VT == MVT::v8i8)
3225 return SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST);
3226 else if (VT == MVT::v16i8)
3227 return SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST);
3228 else if (VT == MVT::v4i16 || VT == MVT::v4f16)
3229 return SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST);
3230 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3231 return SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST);
3232 else if (VT == MVT::v2i32 || VT == MVT::v2f32)
3233 return SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST);
3234 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3235 return SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST);
3236 else if (VT == MVT::v1i64 || VT == MVT::v1f64)
3237 return SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST);
3238 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3239 return SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST);
3242 case AArch64ISD::ST2LANEpost: {
3243 VT = Node->getOperand(1).getValueType();
3244 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3245 return SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST);
3246 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3248 return SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST);
3249 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3251 return SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST);
3252 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3254 return SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST);
3257 case AArch64ISD::ST3LANEpost: {
3258 VT = Node->getOperand(1).getValueType();
3259 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3260 return SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST);
3261 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3263 return SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST);
3264 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3266 return SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST);
3267 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3269 return SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST);
3272 case AArch64ISD::ST4LANEpost: {
3273 VT = Node->getOperand(1).getValueType();
3274 if (VT == MVT::v16i8 || VT == MVT::v8i8)
3275 return SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST);
3276 else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 ||
3278 return SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST);
3279 else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 ||
3281 return SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST);
3282 else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 ||
3284 return SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST);
3289 // Select the default instruction
3290 ResNode = SelectCode(Node);
3292 DEBUG(errs() << "=> ");
3293 if (ResNode == nullptr || ResNode == Node)
3294 DEBUG(Node->dump(CurDAG));
3296 DEBUG(ResNode->dump(CurDAG));
3297 DEBUG(errs() << "\n");
3302 /// createAArch64ISelDag - This pass converts a legalized DAG into a
3303 /// AArch64-specific DAG, ready for instruction scheduling.
3304 FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
3305 CodeGenOpt::Level OptLevel) {
3306 return new AArch64DAGToDAGISel(TM, OptLevel);