1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMTargetMachine.h"
17 #include "MCTargetDesc/ARMAddressingModes.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/SelectionDAGISel.h"
24 #include "llvm/IR/CallingConv.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/LLVMContext.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Compiler.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Target/TargetLowering.h"
35 #include "llvm/Target/TargetOptions.h"
39 #define DEBUG_TYPE "arm-isel"
42 DisableShifterOp("disable-shifter-op", cl::Hidden,
43 cl::desc("Disable isel of shifter-op"),
47 CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
48 cl::desc("Check fp vmla / vmls hazard at isel time"),
51 //===--------------------------------------------------------------------===//
52 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
53 /// instructions for SelectionDAG operations.
58 AM2_BASE, // Simple AM2 (+-imm12)
59 AM2_SHOP // Shifter-op AM2
62 class ARMDAGToDAGISel : public SelectionDAGISel {
63 ARMBaseTargetMachine &TM;
65 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
66 /// make the right decision when generating code for different targets.
67 const ARMSubtarget *Subtarget;
70 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
71 CodeGenOpt::Level OptLevel)
72 : SelectionDAGISel(tm, OptLevel), TM(tm),
73 Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
76 bool runOnMachineFunction(MachineFunction &MF) override {
77 // Reset the subtarget each time through.
78 Subtarget = &TM.getSubtarget<ARMSubtarget>();
79 SelectionDAGISel::runOnMachineFunction(MF);
83 const char *getPassName() const override {
84 return "ARM Instruction Selection";
87 void PreprocessISelDAG() override;
89 /// getI32Imm - Return a target constant of type i32 with the specified
91 inline SDValue getI32Imm(unsigned Imm) {
92 return CurDAG->getTargetConstant(Imm, MVT::i32);
95 SDNode *Select(SDNode *N) override;
98 bool hasNoVMLxHazardUse(SDNode *N) const;
99 bool isShifterOpProfitable(const SDValue &Shift,
100 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
101 bool SelectRegShifterOperand(SDValue N, SDValue &A,
102 SDValue &B, SDValue &C,
103 bool CheckProfitability = true);
104 bool SelectImmShifterOperand(SDValue N, SDValue &A,
105 SDValue &B, bool CheckProfitability = true);
106 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A,
107 SDValue &B, SDValue &C) {
108 // Don't apply the profitability check
109 return SelectRegShifterOperand(N, A, B, C, false);
111 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A,
113 // Don't apply the profitability check
114 return SelectImmShifterOperand(N, A, B, false);
117 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
118 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
120 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
121 SDValue &Offset, SDValue &Opc);
122 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
124 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
127 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
129 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
132 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
134 SelectAddrMode2Worker(N, Base, Offset, Opc);
135 // return SelectAddrMode2ShOp(N, Base, Offset, Opc);
136 // This always matches one way or another.
140 bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) {
141 const ConstantSDNode *CN = cast<ConstantSDNode>(N);
142 Pred = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
143 Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32);
147 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
148 SDValue &Offset, SDValue &Opc);
149 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
150 SDValue &Offset, SDValue &Opc);
151 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
152 SDValue &Offset, SDValue &Opc);
153 bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
154 bool SelectAddrMode3(SDValue N, SDValue &Base,
155 SDValue &Offset, SDValue &Opc);
156 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
157 SDValue &Offset, SDValue &Opc);
158 bool SelectAddrMode5(SDValue N, SDValue &Base,
160 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
161 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
163 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
165 // Thumb Addressing Modes:
166 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
167 bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset,
169 bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset);
170 bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset);
171 bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset);
172 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
174 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
176 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
178 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
180 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
182 // Thumb 2 Addressing Modes:
183 bool SelectT2ShifterOperandReg(SDValue N,
184 SDValue &BaseReg, SDValue &Opc);
185 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
186 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
188 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
190 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
191 SDValue &OffReg, SDValue &ShImm);
192 bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm);
194 inline bool is_so_imm(unsigned Imm) const {
195 return ARM_AM::getSOImmVal(Imm) != -1;
198 inline bool is_so_imm_not(unsigned Imm) const {
199 return ARM_AM::getSOImmVal(~Imm) != -1;
202 inline bool is_t2_so_imm(unsigned Imm) const {
203 return ARM_AM::getT2SOImmVal(Imm) != -1;
206 inline bool is_t2_so_imm_not(unsigned Imm) const {
207 return ARM_AM::getT2SOImmVal(~Imm) != -1;
210 // Include the pieces autogenerated from the target description.
211 #include "ARMGenDAGISel.inc"
214 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
216 SDNode *SelectARMIndexedLoad(SDNode *N);
217 SDNode *SelectT2IndexedLoad(SDNode *N);
219 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
220 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
221 /// loads of D registers and even subregs and odd subregs of Q registers.
222 /// For NumVecs <= 2, QOpcodes1 is not used.
223 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
224 const uint16_t *DOpcodes,
225 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
227 /// SelectVST - Select NEON store intrinsics. NumVecs should
228 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
229 /// stores of D registers and even subregs and odd subregs of Q registers.
230 /// For NumVecs <= 2, QOpcodes1 is not used.
231 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
232 const uint16_t *DOpcodes,
233 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
235 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
236 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
237 /// load/store of D registers and Q registers.
238 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
239 bool isUpdating, unsigned NumVecs,
240 const uint16_t *DOpcodes, const uint16_t *QOpcodes);
242 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
243 /// should be 2, 3 or 4. The opcode array specifies the instructions used
244 /// for loading D registers. (Q registers are not supported.)
245 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
246 const uint16_t *Opcodes);
248 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
249 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
250 /// generated to force the table registers to be consecutive.
251 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
253 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
254 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
256 // Select special operations if node forms integer ABS pattern
257 SDNode *SelectABSOp(SDNode *N);
259 SDNode *SelectInlineAsm(SDNode *N);
261 SDNode *SelectConcatVector(SDNode *N);
263 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
264 /// inline asm expressions.
265 bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
266 std::vector<SDValue> &OutOps) override;
268 // Form pairs of consecutive R, S, D, or Q registers.
269 SDNode *createGPRPairNode(EVT VT, SDValue V0, SDValue V1);
270 SDNode *createSRegPairNode(EVT VT, SDValue V0, SDValue V1);
271 SDNode *createDRegPairNode(EVT VT, SDValue V0, SDValue V1);
272 SDNode *createQRegPairNode(EVT VT, SDValue V0, SDValue V1);
274 // Form sequences of 4 consecutive S, D, or Q registers.
275 SDNode *createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
276 SDNode *createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
277 SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
279 // Get the alignment operand for a NEON VLD or VST instruction.
280 SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
284 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
285 /// operand. If so Imm will receive the 32-bit value.
286 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
287 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
288 Imm = cast<ConstantSDNode>(N)->getZExtValue();
294 // isInt32Immediate - This method tests to see if a constant operand.
295 // If so Imm will receive the 32 bit value.
296 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
297 return isInt32Immediate(N.getNode(), Imm);
300 // isOpcWithIntImmediate - This method tests to see if the node is a specific
301 // opcode and that it has a immediate integer right operand.
302 // If so Imm will receive the 32 bit value.
303 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
304 return N->getOpcode() == Opc &&
305 isInt32Immediate(N->getOperand(1).getNode(), Imm);
308 /// \brief Check whether a particular node is a constant value representable as
309 /// (N * Scale) where (N in [\p RangeMin, \p RangeMax).
311 /// \param ScaledConstant [out] - On success, the pre-scaled constant value.
312 static bool isScaledConstantInRange(SDValue Node, int Scale,
313 int RangeMin, int RangeMax,
314 int &ScaledConstant) {
315 assert(Scale > 0 && "Invalid scale!");
317 // Check that this is a constant.
318 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
322 ScaledConstant = (int) C->getZExtValue();
323 if ((ScaledConstant % Scale) != 0)
326 ScaledConstant /= Scale;
327 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
330 void ARMDAGToDAGISel::PreprocessISelDAG() {
331 if (!Subtarget->hasV6T2Ops())
334 bool isThumb2 = Subtarget->isThumb();
335 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
336 E = CurDAG->allnodes_end(); I != E; ) {
337 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
339 if (N->getOpcode() != ISD::ADD)
342 // Look for (add X1, (and (srl X2, c1), c2)) where c2 is constant with
343 // leading zeros, followed by consecutive set bits, followed by 1 or 2
344 // trailing zeros, e.g. 1020.
345 // Transform the expression to
346 // (add X1, (shl (and (srl X2, c1), (c2>>tz)), tz)) where tz is the number
347 // of trailing zeros of c2. The left shift would be folded as an shifter
348 // operand of 'add' and the 'and' and 'srl' would become a bits extraction
351 SDValue N0 = N->getOperand(0);
352 SDValue N1 = N->getOperand(1);
353 unsigned And_imm = 0;
354 if (!isOpcWithIntImmediate(N1.getNode(), ISD::AND, And_imm)) {
355 if (isOpcWithIntImmediate(N0.getNode(), ISD::AND, And_imm))
361 // Check if the AND mask is an immediate of the form: 000.....1111111100
362 unsigned TZ = countTrailingZeros(And_imm);
363 if (TZ != 1 && TZ != 2)
364 // Be conservative here. Shifter operands aren't always free. e.g. On
365 // Swift, left shifter operand of 1 / 2 for free but others are not.
367 // ubfx r3, r1, #16, #8
368 // ldr.w r3, [r0, r3, lsl #2]
371 // and.w r2, r9, r1, lsr #14
375 if (And_imm & (And_imm + 1))
378 // Look for (and (srl X, c1), c2).
379 SDValue Srl = N1.getOperand(0);
380 unsigned Srl_imm = 0;
381 if (!isOpcWithIntImmediate(Srl.getNode(), ISD::SRL, Srl_imm) ||
385 // Make sure first operand is not a shifter operand which would prevent
386 // folding of the left shift.
391 if (SelectT2ShifterOperandReg(N0, CPTmp0, CPTmp1))
394 if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1) ||
395 SelectRegShifterOperand(N0, CPTmp0, CPTmp1, CPTmp2))
399 // Now make the transformation.
400 Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32,
402 CurDAG->getConstant(Srl_imm+TZ, MVT::i32));
403 N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32,
404 Srl, CurDAG->getConstant(And_imm, MVT::i32));
405 N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
406 N1, CurDAG->getConstant(TZ, MVT::i32));
407 CurDAG->UpdateNodeOperands(N, N0, N1);
411 /// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
412 /// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
413 /// least on current ARM implementations) which should be avoidded.
414 bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
415 if (OptLevel == CodeGenOpt::None)
418 if (!CheckVMLxHazard)
421 if (!Subtarget->isCortexA7() && !Subtarget->isCortexA8() &&
422 !Subtarget->isCortexA9() && !Subtarget->isSwift())
428 SDNode *Use = *N->use_begin();
429 if (Use->getOpcode() == ISD::CopyToReg)
431 if (Use->isMachineOpcode()) {
432 const ARMBaseInstrInfo *TII =
433 static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo());
435 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
438 unsigned Opcode = MCID.getOpcode();
439 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
441 // vmlx feeding into another vmlx. We actually want to unfold
442 // the use later in the MLxExpansion pass. e.g.
444 // vmla (stall 8 cycles)
449 // This adds up to about 18 - 19 cycles.
452 // vmul (stall 4 cycles)
453 // vadd adds up to about 14 cycles.
454 return TII->isFpMLxInstruction(Opcode);
460 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
461 ARM_AM::ShiftOpc ShOpcVal,
463 if (!Subtarget->isLikeA9() && !Subtarget->isSwift())
465 if (Shift.hasOneUse())
468 return ShOpcVal == ARM_AM::lsl &&
469 (ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1));
472 bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
475 bool CheckProfitability) {
476 if (DisableShifterOp)
479 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
481 // Don't match base register only case. That is matched to a separate
482 // lower complexity pattern with explicit register operand.
483 if (ShOpcVal == ARM_AM::no_shift) return false;
485 BaseReg = N.getOperand(0);
486 unsigned ShImmVal = 0;
487 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
488 if (!RHS) return false;
489 ShImmVal = RHS->getZExtValue() & 31;
490 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
495 bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
499 bool CheckProfitability) {
500 if (DisableShifterOp)
503 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
505 // Don't match base register only case. That is matched to a separate
506 // lower complexity pattern with explicit register operand.
507 if (ShOpcVal == ARM_AM::no_shift) return false;
509 BaseReg = N.getOperand(0);
510 unsigned ShImmVal = 0;
511 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
512 if (RHS) return false;
514 ShReg = N.getOperand(1);
515 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
517 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
523 bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
526 // Match simple R + imm12 operands.
529 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
530 !CurDAG->isBaseWithConstantOffset(N)) {
531 if (N.getOpcode() == ISD::FrameIndex) {
532 // Match frame index.
533 int FI = cast<FrameIndexSDNode>(N)->getIndex();
534 Base = CurDAG->getTargetFrameIndex(FI,
535 getTargetLowering()->getPointerTy());
536 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
540 if (N.getOpcode() == ARMISD::Wrapper &&
541 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
542 Base = N.getOperand(0);
545 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
549 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
550 int RHSC = (int)RHS->getZExtValue();
551 if (N.getOpcode() == ISD::SUB)
554 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
555 Base = N.getOperand(0);
556 if (Base.getOpcode() == ISD::FrameIndex) {
557 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
558 Base = CurDAG->getTargetFrameIndex(FI,
559 getTargetLowering()->getPointerTy());
561 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
568 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
574 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
576 if (N.getOpcode() == ISD::MUL &&
577 ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) {
578 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
579 // X * [3,5,9] -> X + X * [2,4,8] etc.
580 int RHSC = (int)RHS->getZExtValue();
583 ARM_AM::AddrOpc AddSub = ARM_AM::add;
585 AddSub = ARM_AM::sub;
588 if (isPowerOf2_32(RHSC)) {
589 unsigned ShAmt = Log2_32(RHSC);
590 Base = Offset = N.getOperand(0);
591 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
600 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
601 // ISD::OR that is equivalent to an ISD::ADD.
602 !CurDAG->isBaseWithConstantOffset(N))
605 // Leave simple R +/- imm12 operands for LDRi12
606 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
608 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
609 -0x1000+1, 0x1000, RHSC)) // 12 bits.
613 // Otherwise this is R +/- [possibly shifted] R.
614 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
615 ARM_AM::ShiftOpc ShOpcVal =
616 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
619 Base = N.getOperand(0);
620 Offset = N.getOperand(1);
622 if (ShOpcVal != ARM_AM::no_shift) {
623 // Check to see if the RHS of the shift is a constant, if not, we can't fold
625 if (ConstantSDNode *Sh =
626 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
627 ShAmt = Sh->getZExtValue();
628 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
629 Offset = N.getOperand(1).getOperand(0);
632 ShOpcVal = ARM_AM::no_shift;
635 ShOpcVal = ARM_AM::no_shift;
639 // Try matching (R shl C) + (R).
640 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
641 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
642 N.getOperand(0).hasOneUse())) {
643 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
644 if (ShOpcVal != ARM_AM::no_shift) {
645 // Check to see if the RHS of the shift is a constant, if not, we can't
647 if (ConstantSDNode *Sh =
648 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
649 ShAmt = Sh->getZExtValue();
650 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
651 Offset = N.getOperand(0).getOperand(0);
652 Base = N.getOperand(1);
655 ShOpcVal = ARM_AM::no_shift;
658 ShOpcVal = ARM_AM::no_shift;
663 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
671 AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
675 if (N.getOpcode() == ISD::MUL &&
676 (!(Subtarget->isLikeA9() || Subtarget->isSwift()) || N.hasOneUse())) {
677 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
678 // X * [3,5,9] -> X + X * [2,4,8] etc.
679 int RHSC = (int)RHS->getZExtValue();
682 ARM_AM::AddrOpc AddSub = ARM_AM::add;
684 AddSub = ARM_AM::sub;
687 if (isPowerOf2_32(RHSC)) {
688 unsigned ShAmt = Log2_32(RHSC);
689 Base = Offset = N.getOperand(0);
690 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
699 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
700 // ISD::OR that is equivalent to an ADD.
701 !CurDAG->isBaseWithConstantOffset(N)) {
703 if (N.getOpcode() == ISD::FrameIndex) {
704 int FI = cast<FrameIndexSDNode>(N)->getIndex();
705 Base = CurDAG->getTargetFrameIndex(FI,
706 getTargetLowering()->getPointerTy());
707 } else if (N.getOpcode() == ARMISD::Wrapper &&
708 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
709 Base = N.getOperand(0);
711 Offset = CurDAG->getRegister(0, MVT::i32);
712 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
718 // Match simple R +/- imm12 operands.
719 if (N.getOpcode() != ISD::SUB) {
721 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
722 -0x1000+1, 0x1000, RHSC)) { // 12 bits.
723 Base = N.getOperand(0);
724 if (Base.getOpcode() == ISD::FrameIndex) {
725 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
726 Base = CurDAG->getTargetFrameIndex(FI,
727 getTargetLowering()->getPointerTy());
729 Offset = CurDAG->getRegister(0, MVT::i32);
731 ARM_AM::AddrOpc AddSub = ARM_AM::add;
733 AddSub = ARM_AM::sub;
736 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
743 if ((Subtarget->isLikeA9() || Subtarget->isSwift()) && !N.hasOneUse()) {
744 // Compute R +/- (R << N) and reuse it.
746 Offset = CurDAG->getRegister(0, MVT::i32);
747 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
753 // Otherwise this is R +/- [possibly shifted] R.
754 ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub;
755 ARM_AM::ShiftOpc ShOpcVal =
756 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
759 Base = N.getOperand(0);
760 Offset = N.getOperand(1);
762 if (ShOpcVal != ARM_AM::no_shift) {
763 // Check to see if the RHS of the shift is a constant, if not, we can't fold
765 if (ConstantSDNode *Sh =
766 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
767 ShAmt = Sh->getZExtValue();
768 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
769 Offset = N.getOperand(1).getOperand(0);
772 ShOpcVal = ARM_AM::no_shift;
775 ShOpcVal = ARM_AM::no_shift;
779 // Try matching (R shl C) + (R).
780 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
781 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
782 N.getOperand(0).hasOneUse())) {
783 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
784 if (ShOpcVal != ARM_AM::no_shift) {
785 // Check to see if the RHS of the shift is a constant, if not, we can't
787 if (ConstantSDNode *Sh =
788 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
789 ShAmt = Sh->getZExtValue();
790 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
791 Offset = N.getOperand(0).getOperand(0);
792 Base = N.getOperand(1);
795 ShOpcVal = ARM_AM::no_shift;
798 ShOpcVal = ARM_AM::no_shift;
803 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
808 bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
809 SDValue &Offset, SDValue &Opc) {
810 unsigned Opcode = Op->getOpcode();
811 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
812 ? cast<LoadSDNode>(Op)->getAddressingMode()
813 : cast<StoreSDNode>(Op)->getAddressingMode();
814 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
815 ? ARM_AM::add : ARM_AM::sub;
817 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
821 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
823 if (ShOpcVal != ARM_AM::no_shift) {
824 // Check to see if the RHS of the shift is a constant, if not, we can't fold
826 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
827 ShAmt = Sh->getZExtValue();
828 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
829 Offset = N.getOperand(0);
832 ShOpcVal = ARM_AM::no_shift;
835 ShOpcVal = ARM_AM::no_shift;
839 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
844 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
845 SDValue &Offset, SDValue &Opc) {
846 unsigned Opcode = Op->getOpcode();
847 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
848 ? cast<LoadSDNode>(Op)->getAddressingMode()
849 : cast<StoreSDNode>(Op)->getAddressingMode();
850 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
851 ? ARM_AM::add : ARM_AM::sub;
853 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
854 if (AddSub == ARM_AM::sub) Val *= -1;
855 Offset = CurDAG->getRegister(0, MVT::i32);
856 Opc = CurDAG->getTargetConstant(Val, MVT::i32);
864 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
865 SDValue &Offset, SDValue &Opc) {
866 unsigned Opcode = Op->getOpcode();
867 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
868 ? cast<LoadSDNode>(Op)->getAddressingMode()
869 : cast<StoreSDNode>(Op)->getAddressingMode();
870 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
871 ? ARM_AM::add : ARM_AM::sub;
873 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
874 Offset = CurDAG->getRegister(0, MVT::i32);
875 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
884 bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
889 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
890 SDValue &Base, SDValue &Offset,
892 if (N.getOpcode() == ISD::SUB) {
893 // X - C is canonicalize to X + -C, no need to handle it here.
894 Base = N.getOperand(0);
895 Offset = N.getOperand(1);
896 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
900 if (!CurDAG->isBaseWithConstantOffset(N)) {
902 if (N.getOpcode() == ISD::FrameIndex) {
903 int FI = cast<FrameIndexSDNode>(N)->getIndex();
904 Base = CurDAG->getTargetFrameIndex(FI,
905 getTargetLowering()->getPointerTy());
907 Offset = CurDAG->getRegister(0, MVT::i32);
908 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
912 // If the RHS is +/- imm8, fold into addr mode.
914 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
915 -256 + 1, 256, RHSC)) { // 8 bits.
916 Base = N.getOperand(0);
917 if (Base.getOpcode() == ISD::FrameIndex) {
918 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
919 Base = CurDAG->getTargetFrameIndex(FI,
920 getTargetLowering()->getPointerTy());
922 Offset = CurDAG->getRegister(0, MVT::i32);
924 ARM_AM::AddrOpc AddSub = ARM_AM::add;
926 AddSub = ARM_AM::sub;
929 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
933 Base = N.getOperand(0);
934 Offset = N.getOperand(1);
935 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
939 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
940 SDValue &Offset, SDValue &Opc) {
941 unsigned Opcode = Op->getOpcode();
942 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
943 ? cast<LoadSDNode>(Op)->getAddressingMode()
944 : cast<StoreSDNode>(Op)->getAddressingMode();
945 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
946 ? ARM_AM::add : ARM_AM::sub;
948 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
949 Offset = CurDAG->getRegister(0, MVT::i32);
950 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
955 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
959 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
960 SDValue &Base, SDValue &Offset) {
961 if (!CurDAG->isBaseWithConstantOffset(N)) {
963 if (N.getOpcode() == ISD::FrameIndex) {
964 int FI = cast<FrameIndexSDNode>(N)->getIndex();
965 Base = CurDAG->getTargetFrameIndex(FI,
966 getTargetLowering()->getPointerTy());
967 } else if (N.getOpcode() == ARMISD::Wrapper &&
968 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
969 Base = N.getOperand(0);
971 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
976 // If the RHS is +/- imm8, fold into addr mode.
978 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4,
979 -256 + 1, 256, RHSC)) {
980 Base = N.getOperand(0);
981 if (Base.getOpcode() == ISD::FrameIndex) {
982 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
983 Base = CurDAG->getTargetFrameIndex(FI,
984 getTargetLowering()->getPointerTy());
987 ARM_AM::AddrOpc AddSub = ARM_AM::add;
989 AddSub = ARM_AM::sub;
992 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
998 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
1003 bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
1007 unsigned Alignment = 0;
1008 if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) {
1009 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
1010 // The maximum alignment is equal to the memory size being referenced.
1011 unsigned LSNAlign = LSN->getAlignment();
1012 unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
1013 if (LSNAlign >= MemSize && MemSize > 1)
1014 Alignment = MemSize;
1016 // All other uses of addrmode6 are for intrinsics. For now just record
1017 // the raw alignment value; it will be refined later based on the legal
1018 // alignment operands for the intrinsic.
1019 Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment();
1022 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1026 bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
1028 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
1029 ISD::MemIndexedMode AM = LdSt->getAddressingMode();
1030 if (AM != ISD::POST_INC)
1033 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
1034 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
1035 Offset = CurDAG->getRegister(0, MVT::i32);
1040 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
1041 SDValue &Offset, SDValue &Label) {
1042 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
1043 Offset = N.getOperand(0);
1044 SDValue N1 = N.getOperand(1);
1045 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
1054 //===----------------------------------------------------------------------===//
1055 // Thumb Addressing Modes
1056 //===----------------------------------------------------------------------===//
1058 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
1059 SDValue &Base, SDValue &Offset){
1060 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
1061 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
1062 if (!NC || !NC->isNullValue())
1069 Base = N.getOperand(0);
1070 Offset = N.getOperand(1);
1075 ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base,
1076 SDValue &Offset, unsigned Scale) {
1078 SDValue TmpBase, TmpOffImm;
1079 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1080 return false; // We want to select tLDRspi / tSTRspi instead.
1082 if (N.getOpcode() == ARMISD::Wrapper &&
1083 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1084 return false; // We want to select tLDRpci instead.
1087 if (!CurDAG->isBaseWithConstantOffset(N))
1090 // Thumb does not have [sp, r] address mode.
1091 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1092 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1093 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1094 (RHSR && RHSR->getReg() == ARM::SP))
1097 // FIXME: Why do we explicitly check for a match here and then return false?
1098 // Presumably to allow something else to match, but shouldn't this be
1101 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC))
1104 Base = N.getOperand(0);
1105 Offset = N.getOperand(1);
1110 ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N,
1113 return SelectThumbAddrModeRI(N, Base, Offset, 1);
1117 ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N,
1120 return SelectThumbAddrModeRI(N, Base, Offset, 2);
1124 ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N,
1127 return SelectThumbAddrModeRI(N, Base, Offset, 4);
1131 ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1132 SDValue &Base, SDValue &OffImm) {
1134 SDValue TmpBase, TmpOffImm;
1135 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1136 return false; // We want to select tLDRspi / tSTRspi instead.
1138 if (N.getOpcode() == ARMISD::Wrapper &&
1139 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1140 return false; // We want to select tLDRpci instead.
1143 if (!CurDAG->isBaseWithConstantOffset(N)) {
1144 if (N.getOpcode() == ARMISD::Wrapper &&
1145 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
1146 Base = N.getOperand(0);
1151 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1155 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1156 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1157 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1158 (RHSR && RHSR->getReg() == ARM::SP)) {
1159 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0));
1160 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1161 unsigned LHSC = LHS ? LHS->getZExtValue() : 0;
1162 unsigned RHSC = RHS ? RHS->getZExtValue() : 0;
1164 // Thumb does not have [sp, #imm5] address mode for non-zero imm5.
1165 if (LHSC != 0 || RHSC != 0) return false;
1168 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1172 // If the RHS is + imm5 * scale, fold into addr mode.
1174 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1175 Base = N.getOperand(0);
1176 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1180 Base = N.getOperand(0);
1181 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1186 ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1188 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1192 ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1194 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1198 ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1200 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1203 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1204 SDValue &Base, SDValue &OffImm) {
1205 if (N.getOpcode() == ISD::FrameIndex) {
1206 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1207 Base = CurDAG->getTargetFrameIndex(FI,
1208 getTargetLowering()->getPointerTy());
1209 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1213 if (!CurDAG->isBaseWithConstantOffset(N))
1216 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1217 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
1218 (LHSR && LHSR->getReg() == ARM::SP)) {
1219 // If the RHS is + imm8 * scale, fold into addr mode.
1221 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1222 Base = N.getOperand(0);
1223 if (Base.getOpcode() == ISD::FrameIndex) {
1224 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1225 Base = CurDAG->getTargetFrameIndex(FI,
1226 getTargetLowering()->getPointerTy());
1228 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1237 //===----------------------------------------------------------------------===//
1238 // Thumb 2 Addressing Modes
1239 //===----------------------------------------------------------------------===//
1242 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
1244 if (DisableShifterOp)
1247 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
1249 // Don't match base register only case. That is matched to a separate
1250 // lower complexity pattern with explicit register operand.
1251 if (ShOpcVal == ARM_AM::no_shift) return false;
1253 BaseReg = N.getOperand(0);
1254 unsigned ShImmVal = 0;
1255 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1256 ShImmVal = RHS->getZExtValue() & 31;
1257 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
1264 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1265 SDValue &Base, SDValue &OffImm) {
1266 // Match simple R + imm12 operands.
1269 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1270 !CurDAG->isBaseWithConstantOffset(N)) {
1271 if (N.getOpcode() == ISD::FrameIndex) {
1272 // Match frame index.
1273 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1274 Base = CurDAG->getTargetFrameIndex(FI,
1275 getTargetLowering()->getPointerTy());
1276 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1280 if (N.getOpcode() == ARMISD::Wrapper &&
1281 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
1282 Base = N.getOperand(0);
1283 if (Base.getOpcode() == ISD::TargetConstantPool)
1284 return false; // We want to select t2LDRpci instead.
1287 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1291 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1292 if (SelectT2AddrModeImm8(N, Base, OffImm))
1293 // Let t2LDRi8 handle (R - imm8).
1296 int RHSC = (int)RHS->getZExtValue();
1297 if (N.getOpcode() == ISD::SUB)
1300 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1301 Base = N.getOperand(0);
1302 if (Base.getOpcode() == ISD::FrameIndex) {
1303 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1304 Base = CurDAG->getTargetFrameIndex(FI,
1305 getTargetLowering()->getPointerTy());
1307 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1314 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1318 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1319 SDValue &Base, SDValue &OffImm) {
1320 // Match simple R - imm8 operands.
1321 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1322 !CurDAG->isBaseWithConstantOffset(N))
1325 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1326 int RHSC = (int)RHS->getSExtValue();
1327 if (N.getOpcode() == ISD::SUB)
1330 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1331 Base = N.getOperand(0);
1332 if (Base.getOpcode() == ISD::FrameIndex) {
1333 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1334 Base = CurDAG->getTargetFrameIndex(FI,
1335 getTargetLowering()->getPointerTy());
1337 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1345 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1347 unsigned Opcode = Op->getOpcode();
1348 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1349 ? cast<LoadSDNode>(Op)->getAddressingMode()
1350 : cast<StoreSDNode>(Op)->getAddressingMode();
1352 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1353 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1354 ? CurDAG->getTargetConstant(RHSC, MVT::i32)
1355 : CurDAG->getTargetConstant(-RHSC, MVT::i32);
1362 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1364 SDValue &OffReg, SDValue &ShImm) {
1365 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1366 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1369 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1370 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1371 int RHSC = (int)RHS->getZExtValue();
1372 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1374 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1378 // Look for (R + R) or (R + (R << [1,2,3])).
1380 Base = N.getOperand(0);
1381 OffReg = N.getOperand(1);
1383 // Swap if it is ((R << c) + R).
1384 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
1385 if (ShOpcVal != ARM_AM::lsl) {
1386 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1387 if (ShOpcVal == ARM_AM::lsl)
1388 std::swap(Base, OffReg);
1391 if (ShOpcVal == ARM_AM::lsl) {
1392 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1394 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1395 ShAmt = Sh->getZExtValue();
1396 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1397 OffReg = OffReg.getOperand(0);
1400 ShOpcVal = ARM_AM::no_shift;
1403 ShOpcVal = ARM_AM::no_shift;
1407 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
1412 bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
1414 // This *must* succeed since it's used for the irreplaceable ldrex and strex
1417 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1419 if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N))
1422 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1426 uint32_t RHSC = (int)RHS->getZExtValue();
1427 if (RHSC > 1020 || RHSC % 4 != 0)
1430 Base = N.getOperand(0);
1431 if (Base.getOpcode() == ISD::FrameIndex) {
1432 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1433 Base = CurDAG->getTargetFrameIndex(FI, getTargetLowering()->getPointerTy());
1436 OffImm = CurDAG->getTargetConstant(RHSC / 4, MVT::i32);
1440 //===--------------------------------------------------------------------===//
1442 /// getAL - Returns a ARMCC::AL immediate node.
1443 static inline SDValue getAL(SelectionDAG *CurDAG) {
1444 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
1447 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
1448 LoadSDNode *LD = cast<LoadSDNode>(N);
1449 ISD::MemIndexedMode AM = LD->getAddressingMode();
1450 if (AM == ISD::UNINDEXED)
1453 EVT LoadedVT = LD->getMemoryVT();
1454 SDValue Offset, AMOpc;
1455 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1456 unsigned Opcode = 0;
1458 if (LoadedVT == MVT::i32 && isPre &&
1459 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1460 Opcode = ARM::LDR_PRE_IMM;
1462 } else if (LoadedVT == MVT::i32 && !isPre &&
1463 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1464 Opcode = ARM::LDR_POST_IMM;
1466 } else if (LoadedVT == MVT::i32 &&
1467 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1468 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1471 } else if (LoadedVT == MVT::i16 &&
1472 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1474 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1475 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1476 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1477 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1478 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1479 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1481 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1485 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1487 Opcode = ARM::LDRB_PRE_IMM;
1488 } else if (!isPre &&
1489 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1491 Opcode = ARM::LDRB_POST_IMM;
1492 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1494 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1500 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1501 SDValue Chain = LD->getChain();
1502 SDValue Base = LD->getBasePtr();
1503 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG),
1504 CurDAG->getRegister(0, MVT::i32), Chain };
1505 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1506 MVT::i32, MVT::Other, Ops);
1508 SDValue Chain = LD->getChain();
1509 SDValue Base = LD->getBasePtr();
1510 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
1511 CurDAG->getRegister(0, MVT::i32), Chain };
1512 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1513 MVT::i32, MVT::Other, Ops);
1520 SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
1521 LoadSDNode *LD = cast<LoadSDNode>(N);
1522 ISD::MemIndexedMode AM = LD->getAddressingMode();
1523 if (AM == ISD::UNINDEXED)
1526 EVT LoadedVT = LD->getMemoryVT();
1527 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1529 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1530 unsigned Opcode = 0;
1532 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1533 switch (LoadedVT.getSimpleVT().SimpleTy) {
1535 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1539 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1541 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1546 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1548 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1557 SDValue Chain = LD->getChain();
1558 SDValue Base = LD->getBasePtr();
1559 SDValue Ops[]= { Base, Offset, getAL(CurDAG),
1560 CurDAG->getRegister(0, MVT::i32), Chain };
1561 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1568 /// \brief Form a GPRPair pseudo register from a pair of GPR regs.
1569 SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
1570 SDLoc dl(V0.getNode());
1572 CurDAG->getTargetConstant(ARM::GPRPairRegClassID, MVT::i32);
1573 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
1574 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
1575 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1576 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1579 /// \brief Form a D register from a pair of S registers.
1580 SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1581 SDLoc dl(V0.getNode());
1583 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32);
1584 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1585 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1586 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1587 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1590 /// \brief Form a quad register from a pair of D registers.
1591 SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1592 SDLoc dl(V0.getNode());
1593 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32);
1594 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1595 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1596 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1597 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1600 /// \brief Form 4 consecutive D registers from a pair of Q registers.
1601 SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1602 SDLoc dl(V0.getNode());
1603 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1604 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1605 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1606 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1607 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1610 /// \brief Form 4 consecutive S registers.
1611 SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
1612 SDValue V2, SDValue V3) {
1613 SDLoc dl(V0.getNode());
1615 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32);
1616 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1617 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1618 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
1619 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
1620 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1621 V2, SubReg2, V3, SubReg3 };
1622 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1625 /// \brief Form 4 consecutive D registers.
1626 SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
1627 SDValue V2, SDValue V3) {
1628 SDLoc dl(V0.getNode());
1629 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1630 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1631 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1632 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
1633 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
1634 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1635 V2, SubReg2, V3, SubReg3 };
1636 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1639 /// \brief Form 4 consecutive Q registers.
1640 SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
1641 SDValue V2, SDValue V3) {
1642 SDLoc dl(V0.getNode());
1643 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32);
1644 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1645 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1646 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32);
1647 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32);
1648 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1649 V2, SubReg2, V3, SubReg3 };
1650 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1653 /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1654 /// of a NEON VLD or VST instruction. The supported values depend on the
1655 /// number of registers being loaded.
1656 SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
1657 bool is64BitVector) {
1658 unsigned NumRegs = NumVecs;
1659 if (!is64BitVector && NumVecs < 3)
1662 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1663 if (Alignment >= 32 && NumRegs == 4)
1665 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1667 else if (Alignment >= 8)
1672 return CurDAG->getTargetConstant(Alignment, MVT::i32);
1675 static bool isVLDfixed(unsigned Opc)
1678 default: return false;
1679 case ARM::VLD1d8wb_fixed : return true;
1680 case ARM::VLD1d16wb_fixed : return true;
1681 case ARM::VLD1d64Qwb_fixed : return true;
1682 case ARM::VLD1d32wb_fixed : return true;
1683 case ARM::VLD1d64wb_fixed : return true;
1684 case ARM::VLD1d64TPseudoWB_fixed : return true;
1685 case ARM::VLD1d64QPseudoWB_fixed : return true;
1686 case ARM::VLD1q8wb_fixed : return true;
1687 case ARM::VLD1q16wb_fixed : return true;
1688 case ARM::VLD1q32wb_fixed : return true;
1689 case ARM::VLD1q64wb_fixed : return true;
1690 case ARM::VLD2d8wb_fixed : return true;
1691 case ARM::VLD2d16wb_fixed : return true;
1692 case ARM::VLD2d32wb_fixed : return true;
1693 case ARM::VLD2q8PseudoWB_fixed : return true;
1694 case ARM::VLD2q16PseudoWB_fixed : return true;
1695 case ARM::VLD2q32PseudoWB_fixed : return true;
1696 case ARM::VLD2DUPd8wb_fixed : return true;
1697 case ARM::VLD2DUPd16wb_fixed : return true;
1698 case ARM::VLD2DUPd32wb_fixed : return true;
1702 static bool isVSTfixed(unsigned Opc)
1705 default: return false;
1706 case ARM::VST1d8wb_fixed : return true;
1707 case ARM::VST1d16wb_fixed : return true;
1708 case ARM::VST1d32wb_fixed : return true;
1709 case ARM::VST1d64wb_fixed : return true;
1710 case ARM::VST1q8wb_fixed : return true;
1711 case ARM::VST1q16wb_fixed : return true;
1712 case ARM::VST1q32wb_fixed : return true;
1713 case ARM::VST1q64wb_fixed : return true;
1714 case ARM::VST1d64TPseudoWB_fixed : return true;
1715 case ARM::VST1d64QPseudoWB_fixed : return true;
1716 case ARM::VST2d8wb_fixed : return true;
1717 case ARM::VST2d16wb_fixed : return true;
1718 case ARM::VST2d32wb_fixed : return true;
1719 case ARM::VST2q8PseudoWB_fixed : return true;
1720 case ARM::VST2q16PseudoWB_fixed : return true;
1721 case ARM::VST2q32PseudoWB_fixed : return true;
1725 // Get the register stride update opcode of a VLD/VST instruction that
1726 // is otherwise equivalent to the given fixed stride updating instruction.
1727 static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
1728 assert((isVLDfixed(Opc) || isVSTfixed(Opc))
1729 && "Incorrect fixed stride updating instruction.");
1732 case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
1733 case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
1734 case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
1735 case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
1736 case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
1737 case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
1738 case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
1739 case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
1740 case ARM::VLD1d64Twb_fixed: return ARM::VLD1d64Twb_register;
1741 case ARM::VLD1d64Qwb_fixed: return ARM::VLD1d64Qwb_register;
1742 case ARM::VLD1d64TPseudoWB_fixed: return ARM::VLD1d64TPseudoWB_register;
1743 case ARM::VLD1d64QPseudoWB_fixed: return ARM::VLD1d64QPseudoWB_register;
1745 case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
1746 case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
1747 case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
1748 case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
1749 case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
1750 case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
1751 case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
1752 case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
1753 case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
1754 case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
1756 case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
1757 case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
1758 case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
1759 case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
1760 case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
1761 case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
1763 case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
1764 case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
1765 case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
1766 case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
1767 case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
1768 case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
1770 case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
1771 case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
1772 case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
1774 return Opc; // If not one we handle, return it unchanged.
1777 SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
1778 const uint16_t *DOpcodes,
1779 const uint16_t *QOpcodes0,
1780 const uint16_t *QOpcodes1) {
1781 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
1784 SDValue MemAddr, Align;
1785 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1786 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1789 SDValue Chain = N->getOperand(0);
1790 EVT VT = N->getValueType(0);
1791 bool is64BitVector = VT.is64BitVector();
1792 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1794 unsigned OpcodeIndex;
1795 switch (VT.getSimpleVT().SimpleTy) {
1796 default: llvm_unreachable("unhandled vld type");
1797 // Double-register operations:
1798 case MVT::v8i8: OpcodeIndex = 0; break;
1799 case MVT::v4i16: OpcodeIndex = 1; break;
1801 case MVT::v2i32: OpcodeIndex = 2; break;
1802 case MVT::v1i64: OpcodeIndex = 3; break;
1803 // Quad-register operations:
1804 case MVT::v16i8: OpcodeIndex = 0; break;
1805 case MVT::v8i16: OpcodeIndex = 1; break;
1807 case MVT::v4i32: OpcodeIndex = 2; break;
1808 case MVT::v2i64: OpcodeIndex = 3;
1809 assert(NumVecs == 1 && "v2i64 type only supported for VLD1");
1817 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1820 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1822 std::vector<EVT> ResTys;
1823 ResTys.push_back(ResTy);
1825 ResTys.push_back(MVT::i32);
1826 ResTys.push_back(MVT::Other);
1828 SDValue Pred = getAL(CurDAG);
1829 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1831 SmallVector<SDValue, 7> Ops;
1833 // Double registers and VLD1/VLD2 quad registers are directly supported.
1834 if (is64BitVector || NumVecs <= 2) {
1835 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1836 QOpcodes0[OpcodeIndex]);
1837 Ops.push_back(MemAddr);
1838 Ops.push_back(Align);
1840 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1841 // FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0
1842 // case entirely when the rest are updated to that form, too.
1843 if ((NumVecs <= 2) && !isa<ConstantSDNode>(Inc.getNode()))
1844 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1845 // FIXME: We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
1846 // check for that explicitly too. Horribly hacky, but temporary.
1847 if ((NumVecs > 2 && !isVLDfixed(Opc)) ||
1848 !isa<ConstantSDNode>(Inc.getNode()))
1849 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1851 Ops.push_back(Pred);
1852 Ops.push_back(Reg0);
1853 Ops.push_back(Chain);
1854 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1857 // Otherwise, quad registers are loaded with two separate instructions,
1858 // where one loads the even registers and the other loads the odd registers.
1859 EVT AddrTy = MemAddr.getValueType();
1861 // Load the even subregs. This is always an updating load, so that it
1862 // provides the address to the second load for the odd subregs.
1864 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
1865 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
1866 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1867 ResTy, AddrTy, MVT::Other, OpsA);
1868 Chain = SDValue(VLdA, 2);
1870 // Load the odd subregs.
1871 Ops.push_back(SDValue(VLdA, 1));
1872 Ops.push_back(Align);
1874 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1875 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1876 "only constant post-increment update allowed for VLD3/4");
1878 Ops.push_back(Reg0);
1880 Ops.push_back(SDValue(VLdA, 0));
1881 Ops.push_back(Pred);
1882 Ops.push_back(Reg0);
1883 Ops.push_back(Chain);
1884 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, Ops);
1887 // Transfer memoperands.
1888 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1889 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1890 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
1895 // Extract out the subregisters.
1896 SDValue SuperReg = SDValue(VLd, 0);
1897 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1898 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1899 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
1900 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1901 ReplaceUses(SDValue(N, Vec),
1902 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1903 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
1905 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
1909 SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
1910 const uint16_t *DOpcodes,
1911 const uint16_t *QOpcodes0,
1912 const uint16_t *QOpcodes1) {
1913 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
1916 SDValue MemAddr, Align;
1917 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1918 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1919 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1922 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1923 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1925 SDValue Chain = N->getOperand(0);
1926 EVT VT = N->getOperand(Vec0Idx).getValueType();
1927 bool is64BitVector = VT.is64BitVector();
1928 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1930 unsigned OpcodeIndex;
1931 switch (VT.getSimpleVT().SimpleTy) {
1932 default: llvm_unreachable("unhandled vst type");
1933 // Double-register operations:
1934 case MVT::v8i8: OpcodeIndex = 0; break;
1935 case MVT::v4i16: OpcodeIndex = 1; break;
1937 case MVT::v2i32: OpcodeIndex = 2; break;
1938 case MVT::v1i64: OpcodeIndex = 3; break;
1939 // Quad-register operations:
1940 case MVT::v16i8: OpcodeIndex = 0; break;
1941 case MVT::v8i16: OpcodeIndex = 1; break;
1943 case MVT::v4i32: OpcodeIndex = 2; break;
1944 case MVT::v2i64: OpcodeIndex = 3;
1945 assert(NumVecs == 1 && "v2i64 type only supported for VST1");
1949 std::vector<EVT> ResTys;
1951 ResTys.push_back(MVT::i32);
1952 ResTys.push_back(MVT::Other);
1954 SDValue Pred = getAL(CurDAG);
1955 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1956 SmallVector<SDValue, 7> Ops;
1958 // Double registers and VST1/VST2 quad registers are directly supported.
1959 if (is64BitVector || NumVecs <= 2) {
1962 SrcReg = N->getOperand(Vec0Idx);
1963 } else if (is64BitVector) {
1964 // Form a REG_SEQUENCE to force register allocation.
1965 SDValue V0 = N->getOperand(Vec0Idx + 0);
1966 SDValue V1 = N->getOperand(Vec0Idx + 1);
1968 SrcReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
1970 SDValue V2 = N->getOperand(Vec0Idx + 2);
1971 // If it's a vst3, form a quad D-register and leave the last part as
1973 SDValue V3 = (NumVecs == 3)
1974 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
1975 : N->getOperand(Vec0Idx + 3);
1976 SrcReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
1979 // Form a QQ register.
1980 SDValue Q0 = N->getOperand(Vec0Idx);
1981 SDValue Q1 = N->getOperand(Vec0Idx + 1);
1982 SrcReg = SDValue(createQRegPairNode(MVT::v4i64, Q0, Q1), 0);
1985 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1986 QOpcodes0[OpcodeIndex]);
1987 Ops.push_back(MemAddr);
1988 Ops.push_back(Align);
1990 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1991 // FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0
1992 // case entirely when the rest are updated to that form, too.
1993 if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode()))
1994 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1995 // FIXME: We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so
1996 // check for that explicitly too. Horribly hacky, but temporary.
1997 if (!isa<ConstantSDNode>(Inc.getNode()))
1999 else if (NumVecs > 2 && !isVSTfixed(Opc))
2000 Ops.push_back(Reg0);
2002 Ops.push_back(SrcReg);
2003 Ops.push_back(Pred);
2004 Ops.push_back(Reg0);
2005 Ops.push_back(Chain);
2006 SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2008 // Transfer memoperands.
2009 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
2014 // Otherwise, quad registers are stored with two separate instructions,
2015 // where one stores the even registers and the other stores the odd registers.
2017 // Form the QQQQ REG_SEQUENCE.
2018 SDValue V0 = N->getOperand(Vec0Idx + 0);
2019 SDValue V1 = N->getOperand(Vec0Idx + 1);
2020 SDValue V2 = N->getOperand(Vec0Idx + 2);
2021 SDValue V3 = (NumVecs == 3)
2022 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2023 : N->getOperand(Vec0Idx + 3);
2024 SDValue RegSeq = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2026 // Store the even D registers. This is always an updating store, so that it
2027 // provides the address to the second store for the odd subregs.
2028 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
2029 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
2030 MemAddr.getValueType(),
2032 cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1);
2033 Chain = SDValue(VStA, 1);
2035 // Store the odd D registers.
2036 Ops.push_back(SDValue(VStA, 0));
2037 Ops.push_back(Align);
2039 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2040 assert(isa<ConstantSDNode>(Inc.getNode()) &&
2041 "only constant post-increment update allowed for VST3/4");
2043 Ops.push_back(Reg0);
2045 Ops.push_back(RegSeq);
2046 Ops.push_back(Pred);
2047 Ops.push_back(Reg0);
2048 Ops.push_back(Chain);
2049 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
2051 cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
2055 SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
2056 bool isUpdating, unsigned NumVecs,
2057 const uint16_t *DOpcodes,
2058 const uint16_t *QOpcodes) {
2059 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
2062 SDValue MemAddr, Align;
2063 unsigned AddrOpIdx = isUpdating ? 1 : 2;
2064 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
2065 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2068 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2069 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2071 SDValue Chain = N->getOperand(0);
2073 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
2074 EVT VT = N->getOperand(Vec0Idx).getValueType();
2075 bool is64BitVector = VT.is64BitVector();
2077 unsigned Alignment = 0;
2079 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2080 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2081 if (Alignment > NumBytes)
2082 Alignment = NumBytes;
2083 if (Alignment < 8 && Alignment < NumBytes)
2085 // Alignment must be a power of two; make sure of that.
2086 Alignment = (Alignment & -Alignment);
2090 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
2092 unsigned OpcodeIndex;
2093 switch (VT.getSimpleVT().SimpleTy) {
2094 default: llvm_unreachable("unhandled vld/vst lane type");
2095 // Double-register operations:
2096 case MVT::v8i8: OpcodeIndex = 0; break;
2097 case MVT::v4i16: OpcodeIndex = 1; break;
2099 case MVT::v2i32: OpcodeIndex = 2; break;
2100 // Quad-register operations:
2101 case MVT::v8i16: OpcodeIndex = 0; break;
2103 case MVT::v4i32: OpcodeIndex = 1; break;
2106 std::vector<EVT> ResTys;
2108 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2111 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
2112 MVT::i64, ResTyElts));
2115 ResTys.push_back(MVT::i32);
2116 ResTys.push_back(MVT::Other);
2118 SDValue Pred = getAL(CurDAG);
2119 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2121 SmallVector<SDValue, 8> Ops;
2122 Ops.push_back(MemAddr);
2123 Ops.push_back(Align);
2125 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2126 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
2130 SDValue V0 = N->getOperand(Vec0Idx + 0);
2131 SDValue V1 = N->getOperand(Vec0Idx + 1);
2134 SuperReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2136 SuperReg = SDValue(createQRegPairNode(MVT::v4i64, V0, V1), 0);
2138 SDValue V2 = N->getOperand(Vec0Idx + 2);
2139 SDValue V3 = (NumVecs == 3)
2140 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2141 : N->getOperand(Vec0Idx + 3);
2143 SuperReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2145 SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2147 Ops.push_back(SuperReg);
2148 Ops.push_back(getI32Imm(Lane));
2149 Ops.push_back(Pred);
2150 Ops.push_back(Reg0);
2151 Ops.push_back(Chain);
2153 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2154 QOpcodes[OpcodeIndex]);
2155 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2156 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
2160 // Extract the subregisters.
2161 SuperReg = SDValue(VLdLn, 0);
2162 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
2163 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
2164 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
2165 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2166 ReplaceUses(SDValue(N, Vec),
2167 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2168 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
2170 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
2174 SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
2176 const uint16_t *Opcodes) {
2177 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
2180 SDValue MemAddr, Align;
2181 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
2184 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2185 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2187 SDValue Chain = N->getOperand(0);
2188 EVT VT = N->getValueType(0);
2190 unsigned Alignment = 0;
2192 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2193 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2194 if (Alignment > NumBytes)
2195 Alignment = NumBytes;
2196 if (Alignment < 8 && Alignment < NumBytes)
2198 // Alignment must be a power of two; make sure of that.
2199 Alignment = (Alignment & -Alignment);
2203 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
2205 unsigned OpcodeIndex;
2206 switch (VT.getSimpleVT().SimpleTy) {
2207 default: llvm_unreachable("unhandled vld-dup type");
2208 case MVT::v8i8: OpcodeIndex = 0; break;
2209 case MVT::v4i16: OpcodeIndex = 1; break;
2211 case MVT::v2i32: OpcodeIndex = 2; break;
2214 SDValue Pred = getAL(CurDAG);
2215 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2217 unsigned Opc = Opcodes[OpcodeIndex];
2218 SmallVector<SDValue, 6> Ops;
2219 Ops.push_back(MemAddr);
2220 Ops.push_back(Align);
2222 // fixed-stride update instructions don't have an explicit writeback
2223 // operand. It's implicit in the opcode itself.
2224 SDValue Inc = N->getOperand(2);
2225 if (!isa<ConstantSDNode>(Inc.getNode()))
2227 // FIXME: VLD3 and VLD4 haven't been updated to that form yet.
2228 else if (NumVecs > 2)
2229 Ops.push_back(Reg0);
2231 Ops.push_back(Pred);
2232 Ops.push_back(Reg0);
2233 Ops.push_back(Chain);
2235 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2236 std::vector<EVT> ResTys;
2237 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts));
2239 ResTys.push_back(MVT::i32);
2240 ResTys.push_back(MVT::Other);
2241 SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2242 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
2243 SuperReg = SDValue(VLdDup, 0);
2245 // Extract the subregisters.
2246 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
2247 unsigned SubIdx = ARM::dsub_0;
2248 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2249 ReplaceUses(SDValue(N, Vec),
2250 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
2251 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
2253 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
2257 SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
2259 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
2261 EVT VT = N->getValueType(0);
2262 unsigned FirstTblReg = IsExt ? 2 : 1;
2264 // Form a REG_SEQUENCE to force register allocation.
2266 SDValue V0 = N->getOperand(FirstTblReg + 0);
2267 SDValue V1 = N->getOperand(FirstTblReg + 1);
2269 RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
2271 SDValue V2 = N->getOperand(FirstTblReg + 2);
2272 // If it's a vtbl3, form a quad D-register and leave the last part as
2274 SDValue V3 = (NumVecs == 3)
2275 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2276 : N->getOperand(FirstTblReg + 3);
2277 RegSeq = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2280 SmallVector<SDValue, 6> Ops;
2282 Ops.push_back(N->getOperand(1));
2283 Ops.push_back(RegSeq);
2284 Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
2285 Ops.push_back(getAL(CurDAG)); // predicate
2286 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
2287 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2290 SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
2292 if (!Subtarget->hasV6T2Ops())
2295 unsigned Opc = isSigned
2296 ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
2297 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
2299 // For unsigned extracts, check for a shift right and mask
2300 unsigned And_imm = 0;
2301 if (N->getOpcode() == ISD::AND) {
2302 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
2304 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
2305 if (And_imm & (And_imm + 1))
2308 unsigned Srl_imm = 0;
2309 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
2311 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2313 // Note: The width operand is encoded as width-1.
2314 unsigned Width = CountTrailingOnes_32(And_imm) - 1;
2315 unsigned LSB = Srl_imm;
2317 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2319 if ((LSB + Width + 1) == N->getValueType(0).getSizeInBits()) {
2320 // It's cheaper to use a right shift to extract the top bits.
2321 if (Subtarget->isThumb()) {
2322 Opc = isSigned ? ARM::t2ASRri : ARM::t2LSRri;
2323 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2324 CurDAG->getTargetConstant(LSB, MVT::i32),
2325 getAL(CurDAG), Reg0, Reg0 };
2326 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2329 // ARM models shift instructions as MOVsi with shifter operand.
2330 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(ISD::SRL);
2332 CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB),
2334 SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
2335 getAL(CurDAG), Reg0, Reg0 };
2336 return CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops);
2339 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2340 CurDAG->getTargetConstant(LSB, MVT::i32),
2341 CurDAG->getTargetConstant(Width, MVT::i32),
2342 getAL(CurDAG), Reg0 };
2343 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2349 // Otherwise, we're looking for a shift of a shift
2350 unsigned Shl_imm = 0;
2351 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
2352 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
2353 unsigned Srl_imm = 0;
2354 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
2355 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2356 // Note: The width operand is encoded as width-1.
2357 unsigned Width = 32 - Srl_imm - 1;
2358 int LSB = Srl_imm - Shl_imm;
2361 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2362 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2363 CurDAG->getTargetConstant(LSB, MVT::i32),
2364 CurDAG->getTargetConstant(Width, MVT::i32),
2365 getAL(CurDAG), Reg0 };
2366 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2372 /// Target-specific DAG combining for ISD::XOR.
2373 /// Target-independent combining lowers SELECT_CC nodes of the form
2374 /// select_cc setg[ge] X, 0, X, -X
2375 /// select_cc setgt X, -1, X, -X
2376 /// select_cc setl[te] X, 0, -X, X
2377 /// select_cc setlt X, 1, -X, X
2378 /// which represent Integer ABS into:
2379 /// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
2380 /// ARM instruction selection detects the latter and matches it to
2381 /// ARM::ABS or ARM::t2ABS machine node.
2382 SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
2383 SDValue XORSrc0 = N->getOperand(0);
2384 SDValue XORSrc1 = N->getOperand(1);
2385 EVT VT = N->getValueType(0);
2387 if (Subtarget->isThumb1Only())
2390 if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA)
2393 SDValue ADDSrc0 = XORSrc0.getOperand(0);
2394 SDValue ADDSrc1 = XORSrc0.getOperand(1);
2395 SDValue SRASrc0 = XORSrc1.getOperand(0);
2396 SDValue SRASrc1 = XORSrc1.getOperand(1);
2397 ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1);
2398 EVT XType = SRASrc0.getValueType();
2399 unsigned Size = XType.getSizeInBits() - 1;
2401 if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 &&
2402 XType.isInteger() && SRAConstant != nullptr &&
2403 Size == SRAConstant->getZExtValue()) {
2404 unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
2405 return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
2411 SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
2412 // The only time a CONCAT_VECTORS operation can have legal types is when
2413 // two 64-bit vectors are concatenated to a 128-bit vector.
2414 EVT VT = N->getValueType(0);
2415 if (!VT.is128BitVector() || N->getNumOperands() != 2)
2416 llvm_unreachable("unexpected CONCAT_VECTORS");
2417 return createDRegPairNode(VT, N->getOperand(0), N->getOperand(1));
2420 SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
2423 if (N->isMachineOpcode()) {
2425 return nullptr; // Already selected.
2428 switch (N->getOpcode()) {
2430 case ISD::INLINEASM: {
2431 SDNode *ResNode = SelectInlineAsm(N);
2437 // Select special operations if XOR node forms integer ABS pattern
2438 SDNode *ResNode = SelectABSOp(N);
2441 // Other cases are autogenerated.
2444 case ISD::Constant: {
2445 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
2447 if (Subtarget->useMovt())
2448 // Thumb2-aware targets have the MOVT instruction, so all immediates can
2449 // be done with MOV + MOVT, at worst.
2452 if (Subtarget->isThumb()) {
2453 UseCP = (Val > 255 && // MOV
2454 ~Val > 255 && // MOV + MVN
2455 !ARM_AM::isThumbImmShiftedVal(Val) && // MOV + LSL
2456 !(Subtarget->hasV6T2Ops() && Val <= 0xffff)); // MOVW
2458 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
2459 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
2460 !ARM_AM::isSOImmTwoPartVal(Val) && // two instrs.
2461 !(Subtarget->hasV6T2Ops() && Val <= 0xffff)); // MOVW
2466 CurDAG->getTargetConstantPool(ConstantInt::get(
2467 Type::getInt32Ty(*CurDAG->getContext()), Val),
2468 getTargetLowering()->getPointerTy());
2471 if (Subtarget->isThumb()) {
2472 SDValue Pred = getAL(CurDAG);
2473 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2474 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
2475 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
2480 CurDAG->getTargetConstant(0, MVT::i32),
2482 CurDAG->getRegister(0, MVT::i32),
2483 CurDAG->getEntryNode()
2485 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
2488 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
2492 // Other cases are autogenerated.
2495 case ISD::FrameIndex: {
2496 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
2497 int FI = cast<FrameIndexSDNode>(N)->getIndex();
2498 SDValue TFI = CurDAG->getTargetFrameIndex(FI,
2499 getTargetLowering()->getPointerTy());
2500 if (Subtarget->isThumb1Only()) {
2501 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2502 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2503 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, Ops);
2505 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
2506 ARM::t2ADDri : ARM::ADDri);
2507 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2508 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2509 CurDAG->getRegister(0, MVT::i32) };
2510 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2514 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2518 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
2522 if (Subtarget->isThumb1Only())
2524 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
2525 unsigned RHSV = C->getZExtValue();
2527 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
2528 unsigned ShImm = Log2_32(RHSV-1);
2531 SDValue V = N->getOperand(0);
2532 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2533 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2534 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2535 if (Subtarget->isThumb()) {
2536 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2537 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops);
2539 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2540 return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops);
2543 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
2544 unsigned ShImm = Log2_32(RHSV+1);
2547 SDValue V = N->getOperand(0);
2548 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2549 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2550 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2551 if (Subtarget->isThumb()) {
2552 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2553 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops);
2555 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2556 return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops);
2562 // Check for unsigned bitfield extract
2563 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2566 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
2567 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
2568 // are entirely contributed by c2 and lower 16-bits are entirely contributed
2569 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
2570 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
2571 EVT VT = N->getValueType(0);
2574 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
2576 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
2579 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2580 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2583 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
2584 SDValue N2 = N0.getOperand(1);
2585 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2588 unsigned N1CVal = N1C->getZExtValue();
2589 unsigned N2CVal = N2C->getZExtValue();
2590 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
2591 (N1CVal & 0xffffU) == 0xffffU &&
2592 (N2CVal & 0xffffU) == 0x0U) {
2593 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
2595 SDValue Ops[] = { N0.getOperand(0), Imm16,
2596 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2597 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2602 case ARMISD::VMOVRRD:
2603 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
2604 N->getOperand(0), getAL(CurDAG),
2605 CurDAG->getRegister(0, MVT::i32));
2606 case ISD::UMUL_LOHI: {
2607 if (Subtarget->isThumb1Only())
2609 if (Subtarget->isThumb()) {
2610 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2611 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2612 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops);
2614 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2615 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2616 CurDAG->getRegister(0, MVT::i32) };
2617 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2618 ARM::UMULL : ARM::UMULLv5,
2619 dl, MVT::i32, MVT::i32, Ops);
2622 case ISD::SMUL_LOHI: {
2623 if (Subtarget->isThumb1Only())
2625 if (Subtarget->isThumb()) {
2626 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2627 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2628 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops);
2630 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2631 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2632 CurDAG->getRegister(0, MVT::i32) };
2633 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2634 ARM::SMULL : ARM::SMULLv5,
2635 dl, MVT::i32, MVT::i32, Ops);
2638 case ARMISD::UMLAL:{
2639 if (Subtarget->isThumb()) {
2640 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2641 N->getOperand(3), getAL(CurDAG),
2642 CurDAG->getRegister(0, MVT::i32)};
2643 return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops);
2645 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2646 N->getOperand(3), getAL(CurDAG),
2647 CurDAG->getRegister(0, MVT::i32),
2648 CurDAG->getRegister(0, MVT::i32) };
2649 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2650 ARM::UMLAL : ARM::UMLALv5,
2651 dl, MVT::i32, MVT::i32, Ops);
2654 case ARMISD::SMLAL:{
2655 if (Subtarget->isThumb()) {
2656 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2657 N->getOperand(3), getAL(CurDAG),
2658 CurDAG->getRegister(0, MVT::i32)};
2659 return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops);
2661 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2662 N->getOperand(3), getAL(CurDAG),
2663 CurDAG->getRegister(0, MVT::i32),
2664 CurDAG->getRegister(0, MVT::i32) };
2665 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2666 ARM::SMLAL : ARM::SMLALv5,
2667 dl, MVT::i32, MVT::i32, Ops);
2671 SDNode *ResNode = nullptr;
2672 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2673 ResNode = SelectT2IndexedLoad(N);
2675 ResNode = SelectARMIndexedLoad(N);
2678 // Other cases are autogenerated.
2681 case ARMISD::BRCOND: {
2682 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2683 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2684 // Pattern complexity = 6 cost = 1 size = 0
2686 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2687 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
2688 // Pattern complexity = 6 cost = 1 size = 0
2690 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2691 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2692 // Pattern complexity = 6 cost = 1 size = 0
2694 unsigned Opc = Subtarget->isThumb() ?
2695 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
2696 SDValue Chain = N->getOperand(0);
2697 SDValue N1 = N->getOperand(1);
2698 SDValue N2 = N->getOperand(2);
2699 SDValue N3 = N->getOperand(3);
2700 SDValue InFlag = N->getOperand(4);
2701 assert(N1.getOpcode() == ISD::BasicBlock);
2702 assert(N2.getOpcode() == ISD::Constant);
2703 assert(N3.getOpcode() == ISD::Register);
2705 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
2706 cast<ConstantSDNode>(N2)->getZExtValue()),
2708 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
2709 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
2711 Chain = SDValue(ResNode, 0);
2712 if (N->getNumValues() == 2) {
2713 InFlag = SDValue(ResNode, 1);
2714 ReplaceUses(SDValue(N, 1), InFlag);
2716 ReplaceUses(SDValue(N, 0),
2717 SDValue(Chain.getNode(), Chain.getResNo()));
2720 case ARMISD::VZIP: {
2722 EVT VT = N->getValueType(0);
2723 switch (VT.getSimpleVT().SimpleTy) {
2724 default: return nullptr;
2725 case MVT::v8i8: Opc = ARM::VZIPd8; break;
2726 case MVT::v4i16: Opc = ARM::VZIPd16; break;
2728 // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2729 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2730 case MVT::v16i8: Opc = ARM::VZIPq8; break;
2731 case MVT::v8i16: Opc = ARM::VZIPq16; break;
2733 case MVT::v4i32: Opc = ARM::VZIPq32; break;
2735 SDValue Pred = getAL(CurDAG);
2736 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2737 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2738 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2740 case ARMISD::VUZP: {
2742 EVT VT = N->getValueType(0);
2743 switch (VT.getSimpleVT().SimpleTy) {
2744 default: return nullptr;
2745 case MVT::v8i8: Opc = ARM::VUZPd8; break;
2746 case MVT::v4i16: Opc = ARM::VUZPd16; break;
2748 // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2749 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2750 case MVT::v16i8: Opc = ARM::VUZPq8; break;
2751 case MVT::v8i16: Opc = ARM::VUZPq16; break;
2753 case MVT::v4i32: Opc = ARM::VUZPq32; break;
2755 SDValue Pred = getAL(CurDAG);
2756 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2757 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2758 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2760 case ARMISD::VTRN: {
2762 EVT VT = N->getValueType(0);
2763 switch (VT.getSimpleVT().SimpleTy) {
2764 default: return nullptr;
2765 case MVT::v8i8: Opc = ARM::VTRNd8; break;
2766 case MVT::v4i16: Opc = ARM::VTRNd16; break;
2768 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2769 case MVT::v16i8: Opc = ARM::VTRNq8; break;
2770 case MVT::v8i16: Opc = ARM::VTRNq16; break;
2772 case MVT::v4i32: Opc = ARM::VTRNq32; break;
2774 SDValue Pred = getAL(CurDAG);
2775 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2776 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2777 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2779 case ARMISD::BUILD_VECTOR: {
2780 EVT VecVT = N->getValueType(0);
2781 EVT EltVT = VecVT.getVectorElementType();
2782 unsigned NumElts = VecVT.getVectorNumElements();
2783 if (EltVT == MVT::f64) {
2784 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
2785 return createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
2787 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
2789 return createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
2790 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
2791 return createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1),
2792 N->getOperand(2), N->getOperand(3));
2795 case ARMISD::VLD2DUP: {
2796 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
2798 return SelectVLDDup(N, false, 2, Opcodes);
2801 case ARMISD::VLD3DUP: {
2802 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
2803 ARM::VLD3DUPd16Pseudo,
2804 ARM::VLD3DUPd32Pseudo };
2805 return SelectVLDDup(N, false, 3, Opcodes);
2808 case ARMISD::VLD4DUP: {
2809 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
2810 ARM::VLD4DUPd16Pseudo,
2811 ARM::VLD4DUPd32Pseudo };
2812 return SelectVLDDup(N, false, 4, Opcodes);
2815 case ARMISD::VLD2DUP_UPD: {
2816 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8wb_fixed,
2817 ARM::VLD2DUPd16wb_fixed,
2818 ARM::VLD2DUPd32wb_fixed };
2819 return SelectVLDDup(N, true, 2, Opcodes);
2822 case ARMISD::VLD3DUP_UPD: {
2823 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
2824 ARM::VLD3DUPd16Pseudo_UPD,
2825 ARM::VLD3DUPd32Pseudo_UPD };
2826 return SelectVLDDup(N, true, 3, Opcodes);
2829 case ARMISD::VLD4DUP_UPD: {
2830 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
2831 ARM::VLD4DUPd16Pseudo_UPD,
2832 ARM::VLD4DUPd32Pseudo_UPD };
2833 return SelectVLDDup(N, true, 4, Opcodes);
2836 case ARMISD::VLD1_UPD: {
2837 static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
2838 ARM::VLD1d16wb_fixed,
2839 ARM::VLD1d32wb_fixed,
2840 ARM::VLD1d64wb_fixed };
2841 static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
2842 ARM::VLD1q16wb_fixed,
2843 ARM::VLD1q32wb_fixed,
2844 ARM::VLD1q64wb_fixed };
2845 return SelectVLD(N, true, 1, DOpcodes, QOpcodes, nullptr);
2848 case ARMISD::VLD2_UPD: {
2849 static const uint16_t DOpcodes[] = { ARM::VLD2d8wb_fixed,
2850 ARM::VLD2d16wb_fixed,
2851 ARM::VLD2d32wb_fixed,
2852 ARM::VLD1q64wb_fixed};
2853 static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
2854 ARM::VLD2q16PseudoWB_fixed,
2855 ARM::VLD2q32PseudoWB_fixed };
2856 return SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
2859 case ARMISD::VLD3_UPD: {
2860 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
2861 ARM::VLD3d16Pseudo_UPD,
2862 ARM::VLD3d32Pseudo_UPD,
2863 ARM::VLD1d64TPseudoWB_fixed};
2864 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
2865 ARM::VLD3q16Pseudo_UPD,
2866 ARM::VLD3q32Pseudo_UPD };
2867 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
2868 ARM::VLD3q16oddPseudo_UPD,
2869 ARM::VLD3q32oddPseudo_UPD };
2870 return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2873 case ARMISD::VLD4_UPD: {
2874 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo_UPD,
2875 ARM::VLD4d16Pseudo_UPD,
2876 ARM::VLD4d32Pseudo_UPD,
2877 ARM::VLD1d64QPseudoWB_fixed};
2878 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
2879 ARM::VLD4q16Pseudo_UPD,
2880 ARM::VLD4q32Pseudo_UPD };
2881 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
2882 ARM::VLD4q16oddPseudo_UPD,
2883 ARM::VLD4q32oddPseudo_UPD };
2884 return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2887 case ARMISD::VLD2LN_UPD: {
2888 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
2889 ARM::VLD2LNd16Pseudo_UPD,
2890 ARM::VLD2LNd32Pseudo_UPD };
2891 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
2892 ARM::VLD2LNq32Pseudo_UPD };
2893 return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
2896 case ARMISD::VLD3LN_UPD: {
2897 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
2898 ARM::VLD3LNd16Pseudo_UPD,
2899 ARM::VLD3LNd32Pseudo_UPD };
2900 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
2901 ARM::VLD3LNq32Pseudo_UPD };
2902 return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
2905 case ARMISD::VLD4LN_UPD: {
2906 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
2907 ARM::VLD4LNd16Pseudo_UPD,
2908 ARM::VLD4LNd32Pseudo_UPD };
2909 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
2910 ARM::VLD4LNq32Pseudo_UPD };
2911 return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
2914 case ARMISD::VST1_UPD: {
2915 static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
2916 ARM::VST1d16wb_fixed,
2917 ARM::VST1d32wb_fixed,
2918 ARM::VST1d64wb_fixed };
2919 static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
2920 ARM::VST1q16wb_fixed,
2921 ARM::VST1q32wb_fixed,
2922 ARM::VST1q64wb_fixed };
2923 return SelectVST(N, true, 1, DOpcodes, QOpcodes, nullptr);
2926 case ARMISD::VST2_UPD: {
2927 static const uint16_t DOpcodes[] = { ARM::VST2d8wb_fixed,
2928 ARM::VST2d16wb_fixed,
2929 ARM::VST2d32wb_fixed,
2930 ARM::VST1q64wb_fixed};
2931 static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
2932 ARM::VST2q16PseudoWB_fixed,
2933 ARM::VST2q32PseudoWB_fixed };
2934 return SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
2937 case ARMISD::VST3_UPD: {
2938 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
2939 ARM::VST3d16Pseudo_UPD,
2940 ARM::VST3d32Pseudo_UPD,
2941 ARM::VST1d64TPseudoWB_fixed};
2942 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
2943 ARM::VST3q16Pseudo_UPD,
2944 ARM::VST3q32Pseudo_UPD };
2945 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
2946 ARM::VST3q16oddPseudo_UPD,
2947 ARM::VST3q32oddPseudo_UPD };
2948 return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2951 case ARMISD::VST4_UPD: {
2952 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo_UPD,
2953 ARM::VST4d16Pseudo_UPD,
2954 ARM::VST4d32Pseudo_UPD,
2955 ARM::VST1d64QPseudoWB_fixed};
2956 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
2957 ARM::VST4q16Pseudo_UPD,
2958 ARM::VST4q32Pseudo_UPD };
2959 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
2960 ARM::VST4q16oddPseudo_UPD,
2961 ARM::VST4q32oddPseudo_UPD };
2962 return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2965 case ARMISD::VST2LN_UPD: {
2966 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
2967 ARM::VST2LNd16Pseudo_UPD,
2968 ARM::VST2LNd32Pseudo_UPD };
2969 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
2970 ARM::VST2LNq32Pseudo_UPD };
2971 return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
2974 case ARMISD::VST3LN_UPD: {
2975 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
2976 ARM::VST3LNd16Pseudo_UPD,
2977 ARM::VST3LNd32Pseudo_UPD };
2978 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
2979 ARM::VST3LNq32Pseudo_UPD };
2980 return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
2983 case ARMISD::VST4LN_UPD: {
2984 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
2985 ARM::VST4LNd16Pseudo_UPD,
2986 ARM::VST4LNd32Pseudo_UPD };
2987 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
2988 ARM::VST4LNq32Pseudo_UPD };
2989 return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
2992 case ISD::INTRINSIC_VOID:
2993 case ISD::INTRINSIC_W_CHAIN: {
2994 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2999 case Intrinsic::arm_ldaexd:
3000 case Intrinsic::arm_ldrexd: {
3002 SDValue Chain = N->getOperand(0);
3003 SDValue MemAddr = N->getOperand(2);
3004 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
3006 bool IsAcquire = IntNo == Intrinsic::arm_ldaexd;
3007 unsigned NewOpc = isThumb ? (IsAcquire ? ARM::t2LDAEXD : ARM::t2LDREXD)
3008 : (IsAcquire ? ARM::LDAEXD : ARM::LDREXD);
3010 // arm_ldrexd returns a i64 value in {i32, i32}
3011 std::vector<EVT> ResTys;
3013 ResTys.push_back(MVT::i32);
3014 ResTys.push_back(MVT::i32);
3016 ResTys.push_back(MVT::Untyped);
3017 ResTys.push_back(MVT::Other);
3019 // Place arguments in the right order.
3020 SmallVector<SDValue, 7> Ops;
3021 Ops.push_back(MemAddr);
3022 Ops.push_back(getAL(CurDAG));
3023 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3024 Ops.push_back(Chain);
3025 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
3026 // Transfer memoperands.
3027 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3028 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3029 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
3032 SDValue OutChain = isThumb ? SDValue(Ld, 2) : SDValue(Ld, 1);
3033 if (!SDValue(N, 0).use_empty()) {
3036 Result = SDValue(Ld, 0);
3038 SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
3039 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3040 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
3041 Result = SDValue(ResNode,0);
3043 ReplaceUses(SDValue(N, 0), Result);
3045 if (!SDValue(N, 1).use_empty()) {
3048 Result = SDValue(Ld, 1);
3050 SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
3051 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3052 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
3053 Result = SDValue(ResNode,0);
3055 ReplaceUses(SDValue(N, 1), Result);
3057 ReplaceUses(SDValue(N, 2), OutChain);
3060 case Intrinsic::arm_stlexd:
3061 case Intrinsic::arm_strexd: {
3063 SDValue Chain = N->getOperand(0);
3064 SDValue Val0 = N->getOperand(2);
3065 SDValue Val1 = N->getOperand(3);
3066 SDValue MemAddr = N->getOperand(4);
3068 // Store exclusive double return a i32 value which is the return status
3069 // of the issued store.
3070 EVT ResTys[] = { MVT::i32, MVT::Other };
3072 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
3073 // Place arguments in the right order.
3074 SmallVector<SDValue, 7> Ops;
3076 Ops.push_back(Val0);
3077 Ops.push_back(Val1);
3079 // arm_strexd uses GPRPair.
3080 Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, Val0, Val1), 0));
3081 Ops.push_back(MemAddr);
3082 Ops.push_back(getAL(CurDAG));
3083 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3084 Ops.push_back(Chain);
3086 bool IsRelease = IntNo == Intrinsic::arm_stlexd;
3087 unsigned NewOpc = isThumb ? (IsRelease ? ARM::t2STLEXD : ARM::t2STREXD)
3088 : (IsRelease ? ARM::STLEXD : ARM::STREXD);
3090 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
3091 // Transfer memoperands.
3092 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3093 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3094 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
3099 case Intrinsic::arm_neon_vld1: {
3100 static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
3101 ARM::VLD1d32, ARM::VLD1d64 };
3102 static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
3103 ARM::VLD1q32, ARM::VLD1q64};
3104 return SelectVLD(N, false, 1, DOpcodes, QOpcodes, nullptr);
3107 case Intrinsic::arm_neon_vld2: {
3108 static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
3109 ARM::VLD2d32, ARM::VLD1q64 };
3110 static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
3111 ARM::VLD2q32Pseudo };
3112 return SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr);
3115 case Intrinsic::arm_neon_vld3: {
3116 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
3119 ARM::VLD1d64TPseudo };
3120 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
3121 ARM::VLD3q16Pseudo_UPD,
3122 ARM::VLD3q32Pseudo_UPD };
3123 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
3124 ARM::VLD3q16oddPseudo,
3125 ARM::VLD3q32oddPseudo };
3126 return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3129 case Intrinsic::arm_neon_vld4: {
3130 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
3133 ARM::VLD1d64QPseudo };
3134 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
3135 ARM::VLD4q16Pseudo_UPD,
3136 ARM::VLD4q32Pseudo_UPD };
3137 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
3138 ARM::VLD4q16oddPseudo,
3139 ARM::VLD4q32oddPseudo };
3140 return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3143 case Intrinsic::arm_neon_vld2lane: {
3144 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
3145 ARM::VLD2LNd16Pseudo,
3146 ARM::VLD2LNd32Pseudo };
3147 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
3148 ARM::VLD2LNq32Pseudo };
3149 return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
3152 case Intrinsic::arm_neon_vld3lane: {
3153 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
3154 ARM::VLD3LNd16Pseudo,
3155 ARM::VLD3LNd32Pseudo };
3156 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
3157 ARM::VLD3LNq32Pseudo };
3158 return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
3161 case Intrinsic::arm_neon_vld4lane: {
3162 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
3163 ARM::VLD4LNd16Pseudo,
3164 ARM::VLD4LNd32Pseudo };
3165 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
3166 ARM::VLD4LNq32Pseudo };
3167 return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
3170 case Intrinsic::arm_neon_vst1: {
3171 static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
3172 ARM::VST1d32, ARM::VST1d64 };
3173 static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
3174 ARM::VST1q32, ARM::VST1q64 };
3175 return SelectVST(N, false, 1, DOpcodes, QOpcodes, nullptr);
3178 case Intrinsic::arm_neon_vst2: {
3179 static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
3180 ARM::VST2d32, ARM::VST1q64 };
3181 static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
3182 ARM::VST2q32Pseudo };
3183 return SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr);
3186 case Intrinsic::arm_neon_vst3: {
3187 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
3190 ARM::VST1d64TPseudo };
3191 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
3192 ARM::VST3q16Pseudo_UPD,
3193 ARM::VST3q32Pseudo_UPD };
3194 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
3195 ARM::VST3q16oddPseudo,
3196 ARM::VST3q32oddPseudo };
3197 return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3200 case Intrinsic::arm_neon_vst4: {
3201 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
3204 ARM::VST1d64QPseudo };
3205 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
3206 ARM::VST4q16Pseudo_UPD,
3207 ARM::VST4q32Pseudo_UPD };
3208 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
3209 ARM::VST4q16oddPseudo,
3210 ARM::VST4q32oddPseudo };
3211 return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3214 case Intrinsic::arm_neon_vst2lane: {
3215 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
3216 ARM::VST2LNd16Pseudo,
3217 ARM::VST2LNd32Pseudo };
3218 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
3219 ARM::VST2LNq32Pseudo };
3220 return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
3223 case Intrinsic::arm_neon_vst3lane: {
3224 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
3225 ARM::VST3LNd16Pseudo,
3226 ARM::VST3LNd32Pseudo };
3227 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
3228 ARM::VST3LNq32Pseudo };
3229 return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
3232 case Intrinsic::arm_neon_vst4lane: {
3233 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
3234 ARM::VST4LNd16Pseudo,
3235 ARM::VST4LNd32Pseudo };
3236 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
3237 ARM::VST4LNq32Pseudo };
3238 return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
3244 case ISD::INTRINSIC_WO_CHAIN: {
3245 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3250 case Intrinsic::arm_neon_vtbl2:
3251 return SelectVTBL(N, false, 2, ARM::VTBL2);
3252 case Intrinsic::arm_neon_vtbl3:
3253 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
3254 case Intrinsic::arm_neon_vtbl4:
3255 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
3257 case Intrinsic::arm_neon_vtbx2:
3258 return SelectVTBL(N, true, 2, ARM::VTBX2);
3259 case Intrinsic::arm_neon_vtbx3:
3260 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
3261 case Intrinsic::arm_neon_vtbx4:
3262 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
3267 case ARMISD::VTBL1: {
3269 EVT VT = N->getValueType(0);
3270 SmallVector<SDValue, 6> Ops;
3272 Ops.push_back(N->getOperand(0));
3273 Ops.push_back(N->getOperand(1));
3274 Ops.push_back(getAL(CurDAG)); // Predicate
3275 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3276 return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops);
3278 case ARMISD::VTBL2: {
3280 EVT VT = N->getValueType(0);
3282 // Form a REG_SEQUENCE to force register allocation.
3283 SDValue V0 = N->getOperand(0);
3284 SDValue V1 = N->getOperand(1);
3285 SDValue RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
3287 SmallVector<SDValue, 6> Ops;
3288 Ops.push_back(RegSeq);
3289 Ops.push_back(N->getOperand(2));
3290 Ops.push_back(getAL(CurDAG)); // Predicate
3291 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3292 return CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops);
3295 case ISD::CONCAT_VECTORS:
3296 return SelectConcatVector(N);
3299 return SelectCode(N);
3302 SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
3303 std::vector<SDValue> AsmNodeOperands;
3304 unsigned Flag, Kind;
3305 bool Changed = false;
3306 unsigned NumOps = N->getNumOperands();
3308 // Normally, i64 data is bounded to two arbitrary GRPs for "%r" constraint.
3309 // However, some instrstions (e.g. ldrexd/strexd in ARM mode) require
3310 // (even/even+1) GPRs and use %n and %Hn to refer to the individual regs
3311 // respectively. Since there is no constraint to explicitly specify a
3312 // reg pair, we use GPRPair reg class for "%r" for 64-bit data. For Thumb,
3313 // the 64-bit data may be referred by H, Q, R modifiers, so we still pack
3314 // them into a GPRPair.
3317 SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1)
3318 : SDValue(nullptr,0);
3320 SmallVector<bool, 8> OpChanged;
3321 // Glue node will be appended late.
3322 for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) {
3323 SDValue op = N->getOperand(i);
3324 AsmNodeOperands.push_back(op);
3326 if (i < InlineAsm::Op_FirstOperand)
3329 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(i))) {
3330 Flag = C->getZExtValue();
3331 Kind = InlineAsm::getKind(Flag);
3336 // Immediate operands to inline asm in the SelectionDAG are modeled with
3337 // two operands. The first is a constant of value InlineAsm::Kind_Imm, and
3338 // the second is a constant with the value of the immediate. If we get here
3339 // and we have a Kind_Imm, skip the next operand, and continue.
3340 if (Kind == InlineAsm::Kind_Imm) {
3341 SDValue op = N->getOperand(++i);
3342 AsmNodeOperands.push_back(op);
3346 unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag);
3348 OpChanged.push_back(false);
3350 unsigned DefIdx = 0;
3351 bool IsTiedToChangedOp = false;
3352 // If it's a use that is tied with a previous def, it has no
3353 // reg class constraint.
3354 if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx))
3355 IsTiedToChangedOp = OpChanged[DefIdx];
3357 if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef
3358 && Kind != InlineAsm::Kind_RegDefEarlyClobber)
3362 bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC);
3363 if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID))
3367 assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
3368 SDValue V0 = N->getOperand(i+1);
3369 SDValue V1 = N->getOperand(i+2);
3370 unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
3371 unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg();
3373 MachineRegisterInfo &MRI = MF->getRegInfo();
3375 if (Kind == InlineAsm::Kind_RegDef ||
3376 Kind == InlineAsm::Kind_RegDefEarlyClobber) {
3377 // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to
3378 // the original GPRs.
3380 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
3381 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
3382 SDValue Chain = SDValue(N,0);
3384 SDNode *GU = N->getGluedUser();
3385 SDValue RegCopy = CurDAG->getCopyFromReg(Chain, dl, GPVR, MVT::Untyped,
3388 // Extract values from a GPRPair reg and copy to the original GPR reg.
3389 SDValue Sub0 = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
3391 SDValue Sub1 = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
3393 SDValue T0 = CurDAG->getCopyToReg(Sub0, dl, Reg0, Sub0,
3394 RegCopy.getValue(1));
3395 SDValue T1 = CurDAG->getCopyToReg(Sub1, dl, Reg1, Sub1, T0.getValue(1));
3397 // Update the original glue user.
3398 std::vector<SDValue> Ops(GU->op_begin(), GU->op_end()-1);
3399 Ops.push_back(T1.getValue(1));
3400 CurDAG->UpdateNodeOperands(GU, Ops);
3404 // For Kind == InlineAsm::Kind_RegUse, we first copy two GPRs into a
3405 // GPRPair and then pass the GPRPair to the inline asm.
3406 SDValue Chain = AsmNodeOperands[InlineAsm::Op_InputChain];
3408 // As REG_SEQ doesn't take RegisterSDNode, we copy them first.
3409 SDValue T0 = CurDAG->getCopyFromReg(Chain, dl, Reg0, MVT::i32,
3411 SDValue T1 = CurDAG->getCopyFromReg(Chain, dl, Reg1, MVT::i32,
3413 SDValue Pair = SDValue(createGPRPairNode(MVT::Untyped, T0, T1), 0);
3415 // Copy REG_SEQ into a GPRPair-typed VR and replace the original two
3416 // i32 VRs of inline asm with it.
3417 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
3418 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
3419 Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1));
3421 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
3422 Glue = Chain.getValue(1);
3427 if(PairedReg.getNode()) {
3428 OpChanged[OpChanged.size() -1 ] = true;
3429 Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/);
3430 if (IsTiedToChangedOp)
3431 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx);
3433 Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID);
3434 // Replace the current flag.
3435 AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant(
3437 // Add the new register node and skip the original two GPRs.
3438 AsmNodeOperands.push_back(PairedReg);
3439 // Skip the next two GPRs.
3445 AsmNodeOperands.push_back(Glue);
3449 SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
3450 CurDAG->getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
3452 return New.getNode();
3456 bool ARMDAGToDAGISel::
3457 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
3458 std::vector<SDValue> &OutOps) {
3459 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
3460 // Require the address to be in a register. That is safe for all ARM
3461 // variants and it is hard to do anything much smarter without knowing
3462 // how the operand is used.
3463 OutOps.push_back(Op);
3467 /// createARMISelDag - This pass converts a legalized DAG into a
3468 /// ARM-specific DAG, ready for instruction scheduling.
3470 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
3471 CodeGenOpt::Level OptLevel) {
3472 return new ARMDAGToDAGISel(TM, OptLevel);