1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "arm-isel"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMTargetMachine.h"
18 #include "MCTargetDesc/ARMAddressingModes.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/IR/CallingConv.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Compiler.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Target/TargetLowering.h"
37 #include "llvm/Target/TargetOptions.h"
42 DisableShifterOp("disable-shifter-op", cl::Hidden,
43 cl::desc("Disable isel of shifter-op"),
47 CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
48 cl::desc("Check fp vmla / vmls hazard at isel time"),
51 //===--------------------------------------------------------------------===//
52 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
53 /// instructions for SelectionDAG operations.
58 AM2_BASE, // Simple AM2 (+-imm12)
59 AM2_SHOP // Shifter-op AM2
62 class ARMDAGToDAGISel : public SelectionDAGISel {
63 ARMBaseTargetMachine &TM;
65 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
66 /// make the right decision when generating code for different targets.
67 const ARMSubtarget *Subtarget;
70 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
71 CodeGenOpt::Level OptLevel)
72 : SelectionDAGISel(tm, OptLevel), TM(tm),
73 Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
76 virtual const char *getPassName() const {
77 return "ARM Instruction Selection";
80 virtual void PreprocessISelDAG();
82 /// getI32Imm - Return a target constant of type i32 with the specified
84 inline SDValue getI32Imm(unsigned Imm) {
85 return CurDAG->getTargetConstant(Imm, MVT::i32);
88 SDNode *Select(SDNode *N);
91 bool hasNoVMLxHazardUse(SDNode *N) const;
92 bool isShifterOpProfitable(const SDValue &Shift,
93 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
94 bool SelectRegShifterOperand(SDValue N, SDValue &A,
95 SDValue &B, SDValue &C,
96 bool CheckProfitability = true);
97 bool SelectImmShifterOperand(SDValue N, SDValue &A,
98 SDValue &B, bool CheckProfitability = true);
99 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A,
100 SDValue &B, SDValue &C) {
101 // Don't apply the profitability check
102 return SelectRegShifterOperand(N, A, B, C, false);
104 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A,
106 // Don't apply the profitability check
107 return SelectImmShifterOperand(N, A, B, false);
110 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
111 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
113 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
114 SDValue &Offset, SDValue &Opc);
115 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
117 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
120 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
122 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
125 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
127 SelectAddrMode2Worker(N, Base, Offset, Opc);
128 // return SelectAddrMode2ShOp(N, Base, Offset, Opc);
129 // This always matches one way or another.
133 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
134 SDValue &Offset, SDValue &Opc);
135 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
136 SDValue &Offset, SDValue &Opc);
137 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
138 SDValue &Offset, SDValue &Opc);
139 bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
140 bool SelectAddrMode3(SDValue N, SDValue &Base,
141 SDValue &Offset, SDValue &Opc);
142 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
143 SDValue &Offset, SDValue &Opc);
144 bool SelectAddrMode5(SDValue N, SDValue &Base,
146 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
147 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
149 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
151 // Thumb Addressing Modes:
152 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
153 bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset,
155 bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset);
156 bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset);
157 bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset);
158 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
160 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
162 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
164 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
166 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
168 // Thumb 2 Addressing Modes:
169 bool SelectT2ShifterOperandReg(SDValue N,
170 SDValue &BaseReg, SDValue &Opc);
171 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
172 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
174 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
176 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
177 SDValue &OffReg, SDValue &ShImm);
179 inline bool is_so_imm(unsigned Imm) const {
180 return ARM_AM::getSOImmVal(Imm) != -1;
183 inline bool is_so_imm_not(unsigned Imm) const {
184 return ARM_AM::getSOImmVal(~Imm) != -1;
187 inline bool is_t2_so_imm(unsigned Imm) const {
188 return ARM_AM::getT2SOImmVal(Imm) != -1;
191 inline bool is_t2_so_imm_not(unsigned Imm) const {
192 return ARM_AM::getT2SOImmVal(~Imm) != -1;
195 // Include the pieces autogenerated from the target description.
196 #include "ARMGenDAGISel.inc"
199 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
201 SDNode *SelectARMIndexedLoad(SDNode *N);
202 SDNode *SelectT2IndexedLoad(SDNode *N);
204 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
205 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
206 /// loads of D registers and even subregs and odd subregs of Q registers.
207 /// For NumVecs <= 2, QOpcodes1 is not used.
208 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
209 const uint16_t *DOpcodes,
210 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
212 /// SelectVST - Select NEON store intrinsics. NumVecs should
213 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
214 /// stores of D registers and even subregs and odd subregs of Q registers.
215 /// For NumVecs <= 2, QOpcodes1 is not used.
216 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
217 const uint16_t *DOpcodes,
218 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
220 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
221 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
222 /// load/store of D registers and Q registers.
223 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
224 bool isUpdating, unsigned NumVecs,
225 const uint16_t *DOpcodes, const uint16_t *QOpcodes);
227 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
228 /// should be 2, 3 or 4. The opcode array specifies the instructions used
229 /// for loading D registers. (Q registers are not supported.)
230 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
231 const uint16_t *Opcodes);
233 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
234 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
235 /// generated to force the table registers to be consecutive.
236 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
238 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
239 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
241 /// SelectCMOVOp - Select CMOV instructions for ARM.
242 SDNode *SelectCMOVOp(SDNode *N);
243 SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
244 ARMCC::CondCodes CCVal, SDValue CCR,
246 SDNode *SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
247 ARMCC::CondCodes CCVal, SDValue CCR,
249 SDNode *SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
250 ARMCC::CondCodes CCVal, SDValue CCR,
252 SDNode *SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
253 ARMCC::CondCodes CCVal, SDValue CCR,
256 // Select special operations if node forms integer ABS pattern
257 SDNode *SelectABSOp(SDNode *N);
259 SDNode *SelectInlineAsm(SDNode *N);
261 SDNode *SelectConcatVector(SDNode *N);
263 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
265 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
266 /// inline asm expressions.
267 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
269 std::vector<SDValue> &OutOps);
271 // Form pairs of consecutive R, S, D, or Q registers.
272 SDNode *createGPRPairNode(EVT VT, SDValue V0, SDValue V1);
273 SDNode *createSRegPairNode(EVT VT, SDValue V0, SDValue V1);
274 SDNode *createDRegPairNode(EVT VT, SDValue V0, SDValue V1);
275 SDNode *createQRegPairNode(EVT VT, SDValue V0, SDValue V1);
277 // Form sequences of 4 consecutive S, D, or Q registers.
278 SDNode *createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
279 SDNode *createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
280 SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
282 // Get the alignment operand for a NEON VLD or VST instruction.
283 SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
287 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
288 /// operand. If so Imm will receive the 32-bit value.
289 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
290 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
291 Imm = cast<ConstantSDNode>(N)->getZExtValue();
297 // isInt32Immediate - This method tests to see if a constant operand.
298 // If so Imm will receive the 32 bit value.
299 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
300 return isInt32Immediate(N.getNode(), Imm);
303 // isOpcWithIntImmediate - This method tests to see if the node is a specific
304 // opcode and that it has a immediate integer right operand.
305 // If so Imm will receive the 32 bit value.
306 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
307 return N->getOpcode() == Opc &&
308 isInt32Immediate(N->getOperand(1).getNode(), Imm);
311 /// \brief Check whether a particular node is a constant value representable as
312 /// (N * Scale) where (N in [\p RangeMin, \p RangeMax).
314 /// \param ScaledConstant [out] - On success, the pre-scaled constant value.
315 static bool isScaledConstantInRange(SDValue Node, int Scale,
316 int RangeMin, int RangeMax,
317 int &ScaledConstant) {
318 assert(Scale > 0 && "Invalid scale!");
320 // Check that this is a constant.
321 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
325 ScaledConstant = (int) C->getZExtValue();
326 if ((ScaledConstant % Scale) != 0)
329 ScaledConstant /= Scale;
330 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
333 void ARMDAGToDAGISel::PreprocessISelDAG() {
334 if (!Subtarget->hasV6T2Ops())
337 bool isThumb2 = Subtarget->isThumb();
338 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
339 E = CurDAG->allnodes_end(); I != E; ) {
340 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
342 if (N->getOpcode() != ISD::ADD)
345 // Look for (add X1, (and (srl X2, c1), c2)) where c2 is constant with
346 // leading zeros, followed by consecutive set bits, followed by 1 or 2
347 // trailing zeros, e.g. 1020.
348 // Transform the expression to
349 // (add X1, (shl (and (srl X2, c1), (c2>>tz)), tz)) where tz is the number
350 // of trailing zeros of c2. The left shift would be folded as an shifter
351 // operand of 'add' and the 'and' and 'srl' would become a bits extraction
354 SDValue N0 = N->getOperand(0);
355 SDValue N1 = N->getOperand(1);
356 unsigned And_imm = 0;
357 if (!isOpcWithIntImmediate(N1.getNode(), ISD::AND, And_imm)) {
358 if (isOpcWithIntImmediate(N0.getNode(), ISD::AND, And_imm))
364 // Check if the AND mask is an immediate of the form: 000.....1111111100
365 unsigned TZ = countTrailingZeros(And_imm);
366 if (TZ != 1 && TZ != 2)
367 // Be conservative here. Shifter operands aren't always free. e.g. On
368 // Swift, left shifter operand of 1 / 2 for free but others are not.
370 // ubfx r3, r1, #16, #8
371 // ldr.w r3, [r0, r3, lsl #2]
374 // and.w r2, r9, r1, lsr #14
378 if (And_imm & (And_imm + 1))
381 // Look for (and (srl X, c1), c2).
382 SDValue Srl = N1.getOperand(0);
383 unsigned Srl_imm = 0;
384 if (!isOpcWithIntImmediate(Srl.getNode(), ISD::SRL, Srl_imm) ||
388 // Make sure first operand is not a shifter operand which would prevent
389 // folding of the left shift.
394 if (SelectT2ShifterOperandReg(N0, CPTmp0, CPTmp1))
397 if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1) ||
398 SelectRegShifterOperand(N0, CPTmp0, CPTmp1, CPTmp2))
402 // Now make the transformation.
403 Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32,
405 CurDAG->getConstant(Srl_imm+TZ, MVT::i32));
406 N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32,
407 Srl, CurDAG->getConstant(And_imm, MVT::i32));
408 N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
409 N1, CurDAG->getConstant(TZ, MVT::i32));
410 CurDAG->UpdateNodeOperands(N, N0, N1);
414 /// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
415 /// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
416 /// least on current ARM implementations) which should be avoidded.
417 bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
418 if (OptLevel == CodeGenOpt::None)
421 if (!CheckVMLxHazard)
424 if (!Subtarget->isCortexA8() && !Subtarget->isLikeA9() &&
425 !Subtarget->isSwift())
431 SDNode *Use = *N->use_begin();
432 if (Use->getOpcode() == ISD::CopyToReg)
434 if (Use->isMachineOpcode()) {
435 const ARMBaseInstrInfo *TII =
436 static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo());
438 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
441 unsigned Opcode = MCID.getOpcode();
442 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
444 // vmlx feeding into another vmlx. We actually want to unfold
445 // the use later in the MLxExpansion pass. e.g.
447 // vmla (stall 8 cycles)
452 // This adds up to about 18 - 19 cycles.
455 // vmul (stall 4 cycles)
456 // vadd adds up to about 14 cycles.
457 return TII->isFpMLxInstruction(Opcode);
463 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
464 ARM_AM::ShiftOpc ShOpcVal,
466 if (!Subtarget->isLikeA9() && !Subtarget->isSwift())
468 if (Shift.hasOneUse())
471 return ShOpcVal == ARM_AM::lsl &&
472 (ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1));
475 bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
478 bool CheckProfitability) {
479 if (DisableShifterOp)
482 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
484 // Don't match base register only case. That is matched to a separate
485 // lower complexity pattern with explicit register operand.
486 if (ShOpcVal == ARM_AM::no_shift) return false;
488 BaseReg = N.getOperand(0);
489 unsigned ShImmVal = 0;
490 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
491 if (!RHS) return false;
492 ShImmVal = RHS->getZExtValue() & 31;
493 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
498 bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
502 bool CheckProfitability) {
503 if (DisableShifterOp)
506 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
508 // Don't match base register only case. That is matched to a separate
509 // lower complexity pattern with explicit register operand.
510 if (ShOpcVal == ARM_AM::no_shift) return false;
512 BaseReg = N.getOperand(0);
513 unsigned ShImmVal = 0;
514 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
515 if (RHS) return false;
517 ShReg = N.getOperand(1);
518 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
520 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
526 bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
529 // Match simple R + imm12 operands.
532 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
533 !CurDAG->isBaseWithConstantOffset(N)) {
534 if (N.getOpcode() == ISD::FrameIndex) {
535 // Match frame index.
536 int FI = cast<FrameIndexSDNode>(N)->getIndex();
537 Base = CurDAG->getTargetFrameIndex(FI,
538 getTargetLowering()->getPointerTy());
539 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
543 if (N.getOpcode() == ARMISD::Wrapper &&
544 !(Subtarget->useMovt() &&
545 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
546 Base = N.getOperand(0);
549 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
553 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
554 int RHSC = (int)RHS->getZExtValue();
555 if (N.getOpcode() == ISD::SUB)
558 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
559 Base = N.getOperand(0);
560 if (Base.getOpcode() == ISD::FrameIndex) {
561 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
562 Base = CurDAG->getTargetFrameIndex(FI,
563 getTargetLowering()->getPointerTy());
565 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
572 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
578 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
580 if (N.getOpcode() == ISD::MUL &&
581 ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) {
582 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
583 // X * [3,5,9] -> X + X * [2,4,8] etc.
584 int RHSC = (int)RHS->getZExtValue();
587 ARM_AM::AddrOpc AddSub = ARM_AM::add;
589 AddSub = ARM_AM::sub;
592 if (isPowerOf2_32(RHSC)) {
593 unsigned ShAmt = Log2_32(RHSC);
594 Base = Offset = N.getOperand(0);
595 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
604 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
605 // ISD::OR that is equivalent to an ISD::ADD.
606 !CurDAG->isBaseWithConstantOffset(N))
609 // Leave simple R +/- imm12 operands for LDRi12
610 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
612 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
613 -0x1000+1, 0x1000, RHSC)) // 12 bits.
617 // Otherwise this is R +/- [possibly shifted] R.
618 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
619 ARM_AM::ShiftOpc ShOpcVal =
620 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
623 Base = N.getOperand(0);
624 Offset = N.getOperand(1);
626 if (ShOpcVal != ARM_AM::no_shift) {
627 // Check to see if the RHS of the shift is a constant, if not, we can't fold
629 if (ConstantSDNode *Sh =
630 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
631 ShAmt = Sh->getZExtValue();
632 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
633 Offset = N.getOperand(1).getOperand(0);
636 ShOpcVal = ARM_AM::no_shift;
639 ShOpcVal = ARM_AM::no_shift;
643 // Try matching (R shl C) + (R).
644 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
645 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
646 N.getOperand(0).hasOneUse())) {
647 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
648 if (ShOpcVal != ARM_AM::no_shift) {
649 // Check to see if the RHS of the shift is a constant, if not, we can't
651 if (ConstantSDNode *Sh =
652 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
653 ShAmt = Sh->getZExtValue();
654 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
655 Offset = N.getOperand(0).getOperand(0);
656 Base = N.getOperand(1);
659 ShOpcVal = ARM_AM::no_shift;
662 ShOpcVal = ARM_AM::no_shift;
667 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
675 AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
679 if (N.getOpcode() == ISD::MUL &&
680 (!(Subtarget->isLikeA9() || Subtarget->isSwift()) || N.hasOneUse())) {
681 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
682 // X * [3,5,9] -> X + X * [2,4,8] etc.
683 int RHSC = (int)RHS->getZExtValue();
686 ARM_AM::AddrOpc AddSub = ARM_AM::add;
688 AddSub = ARM_AM::sub;
691 if (isPowerOf2_32(RHSC)) {
692 unsigned ShAmt = Log2_32(RHSC);
693 Base = Offset = N.getOperand(0);
694 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
703 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
704 // ISD::OR that is equivalent to an ADD.
705 !CurDAG->isBaseWithConstantOffset(N)) {
707 if (N.getOpcode() == ISD::FrameIndex) {
708 int FI = cast<FrameIndexSDNode>(N)->getIndex();
709 Base = CurDAG->getTargetFrameIndex(FI,
710 getTargetLowering()->getPointerTy());
711 } else if (N.getOpcode() == ARMISD::Wrapper &&
712 !(Subtarget->useMovt() &&
713 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
714 Base = N.getOperand(0);
716 Offset = CurDAG->getRegister(0, MVT::i32);
717 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
723 // Match simple R +/- imm12 operands.
724 if (N.getOpcode() != ISD::SUB) {
726 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
727 -0x1000+1, 0x1000, RHSC)) { // 12 bits.
728 Base = N.getOperand(0);
729 if (Base.getOpcode() == ISD::FrameIndex) {
730 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
731 Base = CurDAG->getTargetFrameIndex(FI,
732 getTargetLowering()->getPointerTy());
734 Offset = CurDAG->getRegister(0, MVT::i32);
736 ARM_AM::AddrOpc AddSub = ARM_AM::add;
738 AddSub = ARM_AM::sub;
741 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
748 if ((Subtarget->isLikeA9() || Subtarget->isSwift()) && !N.hasOneUse()) {
749 // Compute R +/- (R << N) and reuse it.
751 Offset = CurDAG->getRegister(0, MVT::i32);
752 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
758 // Otherwise this is R +/- [possibly shifted] R.
759 ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub;
760 ARM_AM::ShiftOpc ShOpcVal =
761 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
764 Base = N.getOperand(0);
765 Offset = N.getOperand(1);
767 if (ShOpcVal != ARM_AM::no_shift) {
768 // Check to see if the RHS of the shift is a constant, if not, we can't fold
770 if (ConstantSDNode *Sh =
771 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
772 ShAmt = Sh->getZExtValue();
773 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
774 Offset = N.getOperand(1).getOperand(0);
777 ShOpcVal = ARM_AM::no_shift;
780 ShOpcVal = ARM_AM::no_shift;
784 // Try matching (R shl C) + (R).
785 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
786 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
787 N.getOperand(0).hasOneUse())) {
788 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
789 if (ShOpcVal != ARM_AM::no_shift) {
790 // Check to see if the RHS of the shift is a constant, if not, we can't
792 if (ConstantSDNode *Sh =
793 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
794 ShAmt = Sh->getZExtValue();
795 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
796 Offset = N.getOperand(0).getOperand(0);
797 Base = N.getOperand(1);
800 ShOpcVal = ARM_AM::no_shift;
803 ShOpcVal = ARM_AM::no_shift;
808 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
813 bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
814 SDValue &Offset, SDValue &Opc) {
815 unsigned Opcode = Op->getOpcode();
816 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
817 ? cast<LoadSDNode>(Op)->getAddressingMode()
818 : cast<StoreSDNode>(Op)->getAddressingMode();
819 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
820 ? ARM_AM::add : ARM_AM::sub;
822 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
826 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
828 if (ShOpcVal != ARM_AM::no_shift) {
829 // Check to see if the RHS of the shift is a constant, if not, we can't fold
831 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
832 ShAmt = Sh->getZExtValue();
833 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
834 Offset = N.getOperand(0);
837 ShOpcVal = ARM_AM::no_shift;
840 ShOpcVal = ARM_AM::no_shift;
844 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
849 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
850 SDValue &Offset, SDValue &Opc) {
851 unsigned Opcode = Op->getOpcode();
852 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
853 ? cast<LoadSDNode>(Op)->getAddressingMode()
854 : cast<StoreSDNode>(Op)->getAddressingMode();
855 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
856 ? ARM_AM::add : ARM_AM::sub;
858 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
859 if (AddSub == ARM_AM::sub) Val *= -1;
860 Offset = CurDAG->getRegister(0, MVT::i32);
861 Opc = CurDAG->getTargetConstant(Val, MVT::i32);
869 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
870 SDValue &Offset, SDValue &Opc) {
871 unsigned Opcode = Op->getOpcode();
872 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
873 ? cast<LoadSDNode>(Op)->getAddressingMode()
874 : cast<StoreSDNode>(Op)->getAddressingMode();
875 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
876 ? ARM_AM::add : ARM_AM::sub;
878 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
879 Offset = CurDAG->getRegister(0, MVT::i32);
880 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
889 bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
894 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
895 SDValue &Base, SDValue &Offset,
897 if (N.getOpcode() == ISD::SUB) {
898 // X - C is canonicalize to X + -C, no need to handle it here.
899 Base = N.getOperand(0);
900 Offset = N.getOperand(1);
901 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
905 if (!CurDAG->isBaseWithConstantOffset(N)) {
907 if (N.getOpcode() == ISD::FrameIndex) {
908 int FI = cast<FrameIndexSDNode>(N)->getIndex();
909 Base = CurDAG->getTargetFrameIndex(FI,
910 getTargetLowering()->getPointerTy());
912 Offset = CurDAG->getRegister(0, MVT::i32);
913 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
917 // If the RHS is +/- imm8, fold into addr mode.
919 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
920 -256 + 1, 256, RHSC)) { // 8 bits.
921 Base = N.getOperand(0);
922 if (Base.getOpcode() == ISD::FrameIndex) {
923 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
924 Base = CurDAG->getTargetFrameIndex(FI,
925 getTargetLowering()->getPointerTy());
927 Offset = CurDAG->getRegister(0, MVT::i32);
929 ARM_AM::AddrOpc AddSub = ARM_AM::add;
931 AddSub = ARM_AM::sub;
934 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
938 Base = N.getOperand(0);
939 Offset = N.getOperand(1);
940 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
944 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
945 SDValue &Offset, SDValue &Opc) {
946 unsigned Opcode = Op->getOpcode();
947 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
948 ? cast<LoadSDNode>(Op)->getAddressingMode()
949 : cast<StoreSDNode>(Op)->getAddressingMode();
950 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
951 ? ARM_AM::add : ARM_AM::sub;
953 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
954 Offset = CurDAG->getRegister(0, MVT::i32);
955 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
960 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
964 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
965 SDValue &Base, SDValue &Offset) {
966 if (!CurDAG->isBaseWithConstantOffset(N)) {
968 if (N.getOpcode() == ISD::FrameIndex) {
969 int FI = cast<FrameIndexSDNode>(N)->getIndex();
970 Base = CurDAG->getTargetFrameIndex(FI,
971 getTargetLowering()->getPointerTy());
972 } else if (N.getOpcode() == ARMISD::Wrapper &&
973 !(Subtarget->useMovt() &&
974 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
975 Base = N.getOperand(0);
977 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
982 // If the RHS is +/- imm8, fold into addr mode.
984 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4,
985 -256 + 1, 256, RHSC)) {
986 Base = N.getOperand(0);
987 if (Base.getOpcode() == ISD::FrameIndex) {
988 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
989 Base = CurDAG->getTargetFrameIndex(FI,
990 getTargetLowering()->getPointerTy());
993 ARM_AM::AddrOpc AddSub = ARM_AM::add;
995 AddSub = ARM_AM::sub;
998 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
1004 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
1009 bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
1013 unsigned Alignment = 0;
1014 if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) {
1015 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
1016 // The maximum alignment is equal to the memory size being referenced.
1017 unsigned LSNAlign = LSN->getAlignment();
1018 unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
1019 if (LSNAlign >= MemSize && MemSize > 1)
1020 Alignment = MemSize;
1022 // All other uses of addrmode6 are for intrinsics. For now just record
1023 // the raw alignment value; it will be refined later based on the legal
1024 // alignment operands for the intrinsic.
1025 Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment();
1028 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1032 bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
1034 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
1035 ISD::MemIndexedMode AM = LdSt->getAddressingMode();
1036 if (AM != ISD::POST_INC)
1039 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
1040 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
1041 Offset = CurDAG->getRegister(0, MVT::i32);
1046 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
1047 SDValue &Offset, SDValue &Label) {
1048 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
1049 Offset = N.getOperand(0);
1050 SDValue N1 = N.getOperand(1);
1051 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
1060 //===----------------------------------------------------------------------===//
1061 // Thumb Addressing Modes
1062 //===----------------------------------------------------------------------===//
1064 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
1065 SDValue &Base, SDValue &Offset){
1066 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
1067 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
1068 if (!NC || !NC->isNullValue())
1075 Base = N.getOperand(0);
1076 Offset = N.getOperand(1);
1081 ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base,
1082 SDValue &Offset, unsigned Scale) {
1084 SDValue TmpBase, TmpOffImm;
1085 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1086 return false; // We want to select tLDRspi / tSTRspi instead.
1088 if (N.getOpcode() == ARMISD::Wrapper &&
1089 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1090 return false; // We want to select tLDRpci instead.
1093 if (!CurDAG->isBaseWithConstantOffset(N))
1096 // Thumb does not have [sp, r] address mode.
1097 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1098 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1099 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1100 (RHSR && RHSR->getReg() == ARM::SP))
1103 // FIXME: Why do we explicitly check for a match here and then return false?
1104 // Presumably to allow something else to match, but shouldn't this be
1107 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC))
1110 Base = N.getOperand(0);
1111 Offset = N.getOperand(1);
1116 ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N,
1119 return SelectThumbAddrModeRI(N, Base, Offset, 1);
1123 ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N,
1126 return SelectThumbAddrModeRI(N, Base, Offset, 2);
1130 ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N,
1133 return SelectThumbAddrModeRI(N, Base, Offset, 4);
1137 ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1138 SDValue &Base, SDValue &OffImm) {
1140 SDValue TmpBase, TmpOffImm;
1141 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1142 return false; // We want to select tLDRspi / tSTRspi instead.
1144 if (N.getOpcode() == ARMISD::Wrapper &&
1145 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1146 return false; // We want to select tLDRpci instead.
1149 if (!CurDAG->isBaseWithConstantOffset(N)) {
1150 if (N.getOpcode() == ARMISD::Wrapper &&
1151 !(Subtarget->useMovt() &&
1152 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
1153 Base = N.getOperand(0);
1158 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1162 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1163 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1164 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1165 (RHSR && RHSR->getReg() == ARM::SP)) {
1166 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0));
1167 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1168 unsigned LHSC = LHS ? LHS->getZExtValue() : 0;
1169 unsigned RHSC = RHS ? RHS->getZExtValue() : 0;
1171 // Thumb does not have [sp, #imm5] address mode for non-zero imm5.
1172 if (LHSC != 0 || RHSC != 0) return false;
1175 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1179 // If the RHS is + imm5 * scale, fold into addr mode.
1181 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1182 Base = N.getOperand(0);
1183 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1187 Base = N.getOperand(0);
1188 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1193 ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1195 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1199 ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1201 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1205 ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1207 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1210 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1211 SDValue &Base, SDValue &OffImm) {
1212 if (N.getOpcode() == ISD::FrameIndex) {
1213 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1214 Base = CurDAG->getTargetFrameIndex(FI,
1215 getTargetLowering()->getPointerTy());
1216 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1220 if (!CurDAG->isBaseWithConstantOffset(N))
1223 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1224 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
1225 (LHSR && LHSR->getReg() == ARM::SP)) {
1226 // If the RHS is + imm8 * scale, fold into addr mode.
1228 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1229 Base = N.getOperand(0);
1230 if (Base.getOpcode() == ISD::FrameIndex) {
1231 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1232 Base = CurDAG->getTargetFrameIndex(FI,
1233 getTargetLowering()->getPointerTy());
1235 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1244 //===----------------------------------------------------------------------===//
1245 // Thumb 2 Addressing Modes
1246 //===----------------------------------------------------------------------===//
1249 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
1251 if (DisableShifterOp)
1254 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
1256 // Don't match base register only case. That is matched to a separate
1257 // lower complexity pattern with explicit register operand.
1258 if (ShOpcVal == ARM_AM::no_shift) return false;
1260 BaseReg = N.getOperand(0);
1261 unsigned ShImmVal = 0;
1262 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1263 ShImmVal = RHS->getZExtValue() & 31;
1264 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
1271 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1272 SDValue &Base, SDValue &OffImm) {
1273 // Match simple R + imm12 operands.
1276 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1277 !CurDAG->isBaseWithConstantOffset(N)) {
1278 if (N.getOpcode() == ISD::FrameIndex) {
1279 // Match frame index.
1280 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1281 Base = CurDAG->getTargetFrameIndex(FI,
1282 getTargetLowering()->getPointerTy());
1283 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1287 if (N.getOpcode() == ARMISD::Wrapper &&
1288 !(Subtarget->useMovt() &&
1289 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
1290 Base = N.getOperand(0);
1291 if (Base.getOpcode() == ISD::TargetConstantPool)
1292 return false; // We want to select t2LDRpci instead.
1295 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1299 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1300 if (SelectT2AddrModeImm8(N, Base, OffImm))
1301 // Let t2LDRi8 handle (R - imm8).
1304 int RHSC = (int)RHS->getZExtValue();
1305 if (N.getOpcode() == ISD::SUB)
1308 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1309 Base = N.getOperand(0);
1310 if (Base.getOpcode() == ISD::FrameIndex) {
1311 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1312 Base = CurDAG->getTargetFrameIndex(FI,
1313 getTargetLowering()->getPointerTy());
1315 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1322 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1326 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1327 SDValue &Base, SDValue &OffImm) {
1328 // Match simple R - imm8 operands.
1329 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1330 !CurDAG->isBaseWithConstantOffset(N))
1333 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1334 int RHSC = (int)RHS->getSExtValue();
1335 if (N.getOpcode() == ISD::SUB)
1338 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1339 Base = N.getOperand(0);
1340 if (Base.getOpcode() == ISD::FrameIndex) {
1341 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1342 Base = CurDAG->getTargetFrameIndex(FI,
1343 getTargetLowering()->getPointerTy());
1345 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1353 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1355 unsigned Opcode = Op->getOpcode();
1356 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1357 ? cast<LoadSDNode>(Op)->getAddressingMode()
1358 : cast<StoreSDNode>(Op)->getAddressingMode();
1360 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1361 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1362 ? CurDAG->getTargetConstant(RHSC, MVT::i32)
1363 : CurDAG->getTargetConstant(-RHSC, MVT::i32);
1370 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1372 SDValue &OffReg, SDValue &ShImm) {
1373 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1374 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1377 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1378 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1379 int RHSC = (int)RHS->getZExtValue();
1380 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1382 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1386 // Look for (R + R) or (R + (R << [1,2,3])).
1388 Base = N.getOperand(0);
1389 OffReg = N.getOperand(1);
1391 // Swap if it is ((R << c) + R).
1392 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
1393 if (ShOpcVal != ARM_AM::lsl) {
1394 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1395 if (ShOpcVal == ARM_AM::lsl)
1396 std::swap(Base, OffReg);
1399 if (ShOpcVal == ARM_AM::lsl) {
1400 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1402 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1403 ShAmt = Sh->getZExtValue();
1404 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1405 OffReg = OffReg.getOperand(0);
1408 ShOpcVal = ARM_AM::no_shift;
1411 ShOpcVal = ARM_AM::no_shift;
1415 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
1420 //===--------------------------------------------------------------------===//
1422 /// getAL - Returns a ARMCC::AL immediate node.
1423 static inline SDValue getAL(SelectionDAG *CurDAG) {
1424 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
1427 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
1428 LoadSDNode *LD = cast<LoadSDNode>(N);
1429 ISD::MemIndexedMode AM = LD->getAddressingMode();
1430 if (AM == ISD::UNINDEXED)
1433 EVT LoadedVT = LD->getMemoryVT();
1434 SDValue Offset, AMOpc;
1435 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1436 unsigned Opcode = 0;
1438 if (LoadedVT == MVT::i32 && isPre &&
1439 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1440 Opcode = ARM::LDR_PRE_IMM;
1442 } else if (LoadedVT == MVT::i32 && !isPre &&
1443 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1444 Opcode = ARM::LDR_POST_IMM;
1446 } else if (LoadedVT == MVT::i32 &&
1447 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1448 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1451 } else if (LoadedVT == MVT::i16 &&
1452 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1454 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1455 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1456 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1457 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1458 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1459 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1461 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1465 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1467 Opcode = ARM::LDRB_PRE_IMM;
1468 } else if (!isPre &&
1469 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1471 Opcode = ARM::LDRB_POST_IMM;
1472 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1474 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1480 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1481 SDValue Chain = LD->getChain();
1482 SDValue Base = LD->getBasePtr();
1483 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG),
1484 CurDAG->getRegister(0, MVT::i32), Chain };
1485 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1486 MVT::i32, MVT::Other, Ops);
1488 SDValue Chain = LD->getChain();
1489 SDValue Base = LD->getBasePtr();
1490 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
1491 CurDAG->getRegister(0, MVT::i32), Chain };
1492 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1493 MVT::i32, MVT::Other, Ops);
1500 SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
1501 LoadSDNode *LD = cast<LoadSDNode>(N);
1502 ISD::MemIndexedMode AM = LD->getAddressingMode();
1503 if (AM == ISD::UNINDEXED)
1506 EVT LoadedVT = LD->getMemoryVT();
1507 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1509 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1510 unsigned Opcode = 0;
1512 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1513 switch (LoadedVT.getSimpleVT().SimpleTy) {
1515 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1519 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1521 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1526 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1528 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1537 SDValue Chain = LD->getChain();
1538 SDValue Base = LD->getBasePtr();
1539 SDValue Ops[]= { Base, Offset, getAL(CurDAG),
1540 CurDAG->getRegister(0, MVT::i32), Chain };
1541 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1548 /// \brief Form a GPRPair pseudo register from a pair of GPR regs.
1549 SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
1550 SDLoc dl(V0.getNode());
1552 CurDAG->getTargetConstant(ARM::GPRPairRegClassID, MVT::i32);
1553 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
1554 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
1555 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1556 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1559 /// \brief Form a D register from a pair of S registers.
1560 SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1561 SDLoc dl(V0.getNode());
1563 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32);
1564 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1565 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1566 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1567 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1570 /// \brief Form a quad register from a pair of D registers.
1571 SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1572 SDLoc dl(V0.getNode());
1573 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32);
1574 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1575 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1576 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1577 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1580 /// \brief Form 4 consecutive D registers from a pair of Q registers.
1581 SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1582 SDLoc dl(V0.getNode());
1583 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1584 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1585 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1586 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1587 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1590 /// \brief Form 4 consecutive S registers.
1591 SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
1592 SDValue V2, SDValue V3) {
1593 SDLoc dl(V0.getNode());
1595 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32);
1596 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1597 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1598 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
1599 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
1600 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1601 V2, SubReg2, V3, SubReg3 };
1602 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1605 /// \brief Form 4 consecutive D registers.
1606 SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
1607 SDValue V2, SDValue V3) {
1608 SDLoc dl(V0.getNode());
1609 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1610 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1611 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1612 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
1613 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
1614 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1615 V2, SubReg2, V3, SubReg3 };
1616 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1619 /// \brief Form 4 consecutive Q registers.
1620 SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
1621 SDValue V2, SDValue V3) {
1622 SDLoc dl(V0.getNode());
1623 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32);
1624 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1625 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1626 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32);
1627 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32);
1628 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1629 V2, SubReg2, V3, SubReg3 };
1630 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1633 /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1634 /// of a NEON VLD or VST instruction. The supported values depend on the
1635 /// number of registers being loaded.
1636 SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
1637 bool is64BitVector) {
1638 unsigned NumRegs = NumVecs;
1639 if (!is64BitVector && NumVecs < 3)
1642 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1643 if (Alignment >= 32 && NumRegs == 4)
1645 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1647 else if (Alignment >= 8)
1652 return CurDAG->getTargetConstant(Alignment, MVT::i32);
1655 // Get the register stride update opcode of a VLD/VST instruction that
1656 // is otherwise equivalent to the given fixed stride updating instruction.
1657 static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
1660 case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
1661 case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
1662 case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
1663 case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
1664 case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
1665 case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
1666 case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
1667 case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
1669 case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
1670 case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
1671 case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
1672 case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
1673 case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
1674 case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
1675 case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
1676 case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
1677 case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
1678 case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
1680 case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
1681 case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
1682 case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
1683 case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
1684 case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
1685 case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
1687 case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
1688 case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
1689 case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
1690 case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
1691 case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
1692 case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
1694 case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
1695 case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
1696 case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
1698 return Opc; // If not one we handle, return it unchanged.
1701 SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
1702 const uint16_t *DOpcodes,
1703 const uint16_t *QOpcodes0,
1704 const uint16_t *QOpcodes1) {
1705 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
1708 SDValue MemAddr, Align;
1709 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1710 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1713 SDValue Chain = N->getOperand(0);
1714 EVT VT = N->getValueType(0);
1715 bool is64BitVector = VT.is64BitVector();
1716 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1718 unsigned OpcodeIndex;
1719 switch (VT.getSimpleVT().SimpleTy) {
1720 default: llvm_unreachable("unhandled vld type");
1721 // Double-register operations:
1722 case MVT::v8i8: OpcodeIndex = 0; break;
1723 case MVT::v4i16: OpcodeIndex = 1; break;
1725 case MVT::v2i32: OpcodeIndex = 2; break;
1726 case MVT::v1i64: OpcodeIndex = 3; break;
1727 // Quad-register operations:
1728 case MVT::v16i8: OpcodeIndex = 0; break;
1729 case MVT::v8i16: OpcodeIndex = 1; break;
1731 case MVT::v4i32: OpcodeIndex = 2; break;
1732 case MVT::v2i64: OpcodeIndex = 3;
1733 assert(NumVecs == 1 && "v2i64 type only supported for VLD1");
1741 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1744 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1746 std::vector<EVT> ResTys;
1747 ResTys.push_back(ResTy);
1749 ResTys.push_back(MVT::i32);
1750 ResTys.push_back(MVT::Other);
1752 SDValue Pred = getAL(CurDAG);
1753 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1755 SmallVector<SDValue, 7> Ops;
1757 // Double registers and VLD1/VLD2 quad registers are directly supported.
1758 if (is64BitVector || NumVecs <= 2) {
1759 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1760 QOpcodes0[OpcodeIndex]);
1761 Ops.push_back(MemAddr);
1762 Ops.push_back(Align);
1764 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1765 // FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0
1766 // case entirely when the rest are updated to that form, too.
1767 if ((NumVecs == 1 || NumVecs == 2) && !isa<ConstantSDNode>(Inc.getNode()))
1768 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1769 // We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
1770 // check for that explicitly too. Horribly hacky, but temporary.
1771 if ((NumVecs != 1 && NumVecs != 2 && Opc != ARM::VLD1q64wb_fixed) ||
1772 !isa<ConstantSDNode>(Inc.getNode()))
1773 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1775 Ops.push_back(Pred);
1776 Ops.push_back(Reg0);
1777 Ops.push_back(Chain);
1778 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1781 // Otherwise, quad registers are loaded with two separate instructions,
1782 // where one loads the even registers and the other loads the odd registers.
1783 EVT AddrTy = MemAddr.getValueType();
1785 // Load the even subregs. This is always an updating load, so that it
1786 // provides the address to the second load for the odd subregs.
1788 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
1789 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
1790 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1791 ResTy, AddrTy, MVT::Other, OpsA);
1792 Chain = SDValue(VLdA, 2);
1794 // Load the odd subregs.
1795 Ops.push_back(SDValue(VLdA, 1));
1796 Ops.push_back(Align);
1798 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1799 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1800 "only constant post-increment update allowed for VLD3/4");
1802 Ops.push_back(Reg0);
1804 Ops.push_back(SDValue(VLdA, 0));
1805 Ops.push_back(Pred);
1806 Ops.push_back(Reg0);
1807 Ops.push_back(Chain);
1808 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, Ops);
1811 // Transfer memoperands.
1812 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1813 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1814 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
1819 // Extract out the subregisters.
1820 SDValue SuperReg = SDValue(VLd, 0);
1821 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1822 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1823 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
1824 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1825 ReplaceUses(SDValue(N, Vec),
1826 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1827 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
1829 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
1833 SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
1834 const uint16_t *DOpcodes,
1835 const uint16_t *QOpcodes0,
1836 const uint16_t *QOpcodes1) {
1837 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
1840 SDValue MemAddr, Align;
1841 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1842 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1843 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1846 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1847 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1849 SDValue Chain = N->getOperand(0);
1850 EVT VT = N->getOperand(Vec0Idx).getValueType();
1851 bool is64BitVector = VT.is64BitVector();
1852 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1854 unsigned OpcodeIndex;
1855 switch (VT.getSimpleVT().SimpleTy) {
1856 default: llvm_unreachable("unhandled vst type");
1857 // Double-register operations:
1858 case MVT::v8i8: OpcodeIndex = 0; break;
1859 case MVT::v4i16: OpcodeIndex = 1; break;
1861 case MVT::v2i32: OpcodeIndex = 2; break;
1862 case MVT::v1i64: OpcodeIndex = 3; break;
1863 // Quad-register operations:
1864 case MVT::v16i8: OpcodeIndex = 0; break;
1865 case MVT::v8i16: OpcodeIndex = 1; break;
1867 case MVT::v4i32: OpcodeIndex = 2; break;
1868 case MVT::v2i64: OpcodeIndex = 3;
1869 assert(NumVecs == 1 && "v2i64 type only supported for VST1");
1873 std::vector<EVT> ResTys;
1875 ResTys.push_back(MVT::i32);
1876 ResTys.push_back(MVT::Other);
1878 SDValue Pred = getAL(CurDAG);
1879 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1880 SmallVector<SDValue, 7> Ops;
1882 // Double registers and VST1/VST2 quad registers are directly supported.
1883 if (is64BitVector || NumVecs <= 2) {
1886 SrcReg = N->getOperand(Vec0Idx);
1887 } else if (is64BitVector) {
1888 // Form a REG_SEQUENCE to force register allocation.
1889 SDValue V0 = N->getOperand(Vec0Idx + 0);
1890 SDValue V1 = N->getOperand(Vec0Idx + 1);
1892 SrcReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
1894 SDValue V2 = N->getOperand(Vec0Idx + 2);
1895 // If it's a vst3, form a quad D-register and leave the last part as
1897 SDValue V3 = (NumVecs == 3)
1898 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
1899 : N->getOperand(Vec0Idx + 3);
1900 SrcReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
1903 // Form a QQ register.
1904 SDValue Q0 = N->getOperand(Vec0Idx);
1905 SDValue Q1 = N->getOperand(Vec0Idx + 1);
1906 SrcReg = SDValue(createQRegPairNode(MVT::v4i64, Q0, Q1), 0);
1909 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1910 QOpcodes0[OpcodeIndex]);
1911 Ops.push_back(MemAddr);
1912 Ops.push_back(Align);
1914 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1915 // FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0
1916 // case entirely when the rest are updated to that form, too.
1917 if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode()))
1918 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1919 // We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so
1920 // check for that explicitly too. Horribly hacky, but temporary.
1921 if ((NumVecs > 2 && Opc != ARM::VST1q64wb_fixed) ||
1922 !isa<ConstantSDNode>(Inc.getNode()))
1923 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1925 Ops.push_back(SrcReg);
1926 Ops.push_back(Pred);
1927 Ops.push_back(Reg0);
1928 Ops.push_back(Chain);
1929 SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1931 // Transfer memoperands.
1932 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
1937 // Otherwise, quad registers are stored with two separate instructions,
1938 // where one stores the even registers and the other stores the odd registers.
1940 // Form the QQQQ REG_SEQUENCE.
1941 SDValue V0 = N->getOperand(Vec0Idx + 0);
1942 SDValue V1 = N->getOperand(Vec0Idx + 1);
1943 SDValue V2 = N->getOperand(Vec0Idx + 2);
1944 SDValue V3 = (NumVecs == 3)
1945 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1946 : N->getOperand(Vec0Idx + 3);
1947 SDValue RegSeq = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
1949 // Store the even D registers. This is always an updating store, so that it
1950 // provides the address to the second store for the odd subregs.
1951 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
1952 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1953 MemAddr.getValueType(),
1955 cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1);
1956 Chain = SDValue(VStA, 1);
1958 // Store the odd D registers.
1959 Ops.push_back(SDValue(VStA, 0));
1960 Ops.push_back(Align);
1962 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1963 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1964 "only constant post-increment update allowed for VST3/4");
1966 Ops.push_back(Reg0);
1968 Ops.push_back(RegSeq);
1969 Ops.push_back(Pred);
1970 Ops.push_back(Reg0);
1971 Ops.push_back(Chain);
1972 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
1974 cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
1978 SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
1979 bool isUpdating, unsigned NumVecs,
1980 const uint16_t *DOpcodes,
1981 const uint16_t *QOpcodes) {
1982 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
1985 SDValue MemAddr, Align;
1986 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1987 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1988 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1991 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1992 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1994 SDValue Chain = N->getOperand(0);
1996 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
1997 EVT VT = N->getOperand(Vec0Idx).getValueType();
1998 bool is64BitVector = VT.is64BitVector();
2000 unsigned Alignment = 0;
2002 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2003 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2004 if (Alignment > NumBytes)
2005 Alignment = NumBytes;
2006 if (Alignment < 8 && Alignment < NumBytes)
2008 // Alignment must be a power of two; make sure of that.
2009 Alignment = (Alignment & -Alignment);
2013 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
2015 unsigned OpcodeIndex;
2016 switch (VT.getSimpleVT().SimpleTy) {
2017 default: llvm_unreachable("unhandled vld/vst lane type");
2018 // Double-register operations:
2019 case MVT::v8i8: OpcodeIndex = 0; break;
2020 case MVT::v4i16: OpcodeIndex = 1; break;
2022 case MVT::v2i32: OpcodeIndex = 2; break;
2023 // Quad-register operations:
2024 case MVT::v8i16: OpcodeIndex = 0; break;
2026 case MVT::v4i32: OpcodeIndex = 1; break;
2029 std::vector<EVT> ResTys;
2031 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2034 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
2035 MVT::i64, ResTyElts));
2038 ResTys.push_back(MVT::i32);
2039 ResTys.push_back(MVT::Other);
2041 SDValue Pred = getAL(CurDAG);
2042 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2044 SmallVector<SDValue, 8> Ops;
2045 Ops.push_back(MemAddr);
2046 Ops.push_back(Align);
2048 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2049 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
2053 SDValue V0 = N->getOperand(Vec0Idx + 0);
2054 SDValue V1 = N->getOperand(Vec0Idx + 1);
2057 SuperReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2059 SuperReg = SDValue(createQRegPairNode(MVT::v4i64, V0, V1), 0);
2061 SDValue V2 = N->getOperand(Vec0Idx + 2);
2062 SDValue V3 = (NumVecs == 3)
2063 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2064 : N->getOperand(Vec0Idx + 3);
2066 SuperReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2068 SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2070 Ops.push_back(SuperReg);
2071 Ops.push_back(getI32Imm(Lane));
2072 Ops.push_back(Pred);
2073 Ops.push_back(Reg0);
2074 Ops.push_back(Chain);
2076 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2077 QOpcodes[OpcodeIndex]);
2078 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2079 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
2083 // Extract the subregisters.
2084 SuperReg = SDValue(VLdLn, 0);
2085 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
2086 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
2087 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
2088 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2089 ReplaceUses(SDValue(N, Vec),
2090 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2091 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
2093 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
2097 SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
2099 const uint16_t *Opcodes) {
2100 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
2103 SDValue MemAddr, Align;
2104 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
2107 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2108 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2110 SDValue Chain = N->getOperand(0);
2111 EVT VT = N->getValueType(0);
2113 unsigned Alignment = 0;
2115 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2116 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2117 if (Alignment > NumBytes)
2118 Alignment = NumBytes;
2119 if (Alignment < 8 && Alignment < NumBytes)
2121 // Alignment must be a power of two; make sure of that.
2122 Alignment = (Alignment & -Alignment);
2126 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
2128 unsigned OpcodeIndex;
2129 switch (VT.getSimpleVT().SimpleTy) {
2130 default: llvm_unreachable("unhandled vld-dup type");
2131 case MVT::v8i8: OpcodeIndex = 0; break;
2132 case MVT::v4i16: OpcodeIndex = 1; break;
2134 case MVT::v2i32: OpcodeIndex = 2; break;
2137 SDValue Pred = getAL(CurDAG);
2138 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2140 unsigned Opc = Opcodes[OpcodeIndex];
2141 SmallVector<SDValue, 6> Ops;
2142 Ops.push_back(MemAddr);
2143 Ops.push_back(Align);
2145 // fixed-stride update instructions don't have an explicit writeback
2146 // operand. It's implicit in the opcode itself.
2147 SDValue Inc = N->getOperand(2);
2148 if (!isa<ConstantSDNode>(Inc.getNode()))
2150 // FIXME: VLD3 and VLD4 haven't been updated to that form yet.
2151 else if (NumVecs > 2)
2152 Ops.push_back(Reg0);
2154 Ops.push_back(Pred);
2155 Ops.push_back(Reg0);
2156 Ops.push_back(Chain);
2158 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2159 std::vector<EVT> ResTys;
2160 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts));
2162 ResTys.push_back(MVT::i32);
2163 ResTys.push_back(MVT::Other);
2164 SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2165 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
2166 SuperReg = SDValue(VLdDup, 0);
2168 // Extract the subregisters.
2169 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
2170 unsigned SubIdx = ARM::dsub_0;
2171 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2172 ReplaceUses(SDValue(N, Vec),
2173 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
2174 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
2176 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
2180 SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
2182 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
2184 EVT VT = N->getValueType(0);
2185 unsigned FirstTblReg = IsExt ? 2 : 1;
2187 // Form a REG_SEQUENCE to force register allocation.
2189 SDValue V0 = N->getOperand(FirstTblReg + 0);
2190 SDValue V1 = N->getOperand(FirstTblReg + 1);
2192 RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
2194 SDValue V2 = N->getOperand(FirstTblReg + 2);
2195 // If it's a vtbl3, form a quad D-register and leave the last part as
2197 SDValue V3 = (NumVecs == 3)
2198 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2199 : N->getOperand(FirstTblReg + 3);
2200 RegSeq = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2203 SmallVector<SDValue, 6> Ops;
2205 Ops.push_back(N->getOperand(1));
2206 Ops.push_back(RegSeq);
2207 Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
2208 Ops.push_back(getAL(CurDAG)); // predicate
2209 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
2210 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2213 SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
2215 if (!Subtarget->hasV6T2Ops())
2218 unsigned Opc = isSigned
2219 ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
2220 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
2222 // For unsigned extracts, check for a shift right and mask
2223 unsigned And_imm = 0;
2224 if (N->getOpcode() == ISD::AND) {
2225 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
2227 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
2228 if (And_imm & (And_imm + 1))
2231 unsigned Srl_imm = 0;
2232 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
2234 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2236 // Note: The width operand is encoded as width-1.
2237 unsigned Width = CountTrailingOnes_32(And_imm) - 1;
2238 unsigned LSB = Srl_imm;
2240 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2242 if ((LSB + Width + 1) == N->getValueType(0).getSizeInBits()) {
2243 // It's cheaper to use a right shift to extract the top bits.
2244 if (Subtarget->isThumb()) {
2245 Opc = isSigned ? ARM::t2ASRri : ARM::t2LSRri;
2246 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2247 CurDAG->getTargetConstant(LSB, MVT::i32),
2248 getAL(CurDAG), Reg0, Reg0 };
2249 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2252 // ARM models shift instructions as MOVsi with shifter operand.
2253 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(ISD::SRL);
2255 CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB),
2257 SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
2258 getAL(CurDAG), Reg0, Reg0 };
2259 return CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops, 5);
2262 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2263 CurDAG->getTargetConstant(LSB, MVT::i32),
2264 CurDAG->getTargetConstant(Width, MVT::i32),
2265 getAL(CurDAG), Reg0 };
2266 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2272 // Otherwise, we're looking for a shift of a shift
2273 unsigned Shl_imm = 0;
2274 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
2275 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
2276 unsigned Srl_imm = 0;
2277 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
2278 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2279 // Note: The width operand is encoded as width-1.
2280 unsigned Width = 32 - Srl_imm - 1;
2281 int LSB = Srl_imm - Shl_imm;
2284 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2285 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2286 CurDAG->getTargetConstant(LSB, MVT::i32),
2287 CurDAG->getTargetConstant(Width, MVT::i32),
2288 getAL(CurDAG), Reg0 };
2289 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2295 SDNode *ARMDAGToDAGISel::
2296 SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2297 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2300 if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) {
2301 unsigned SOVal = cast<ConstantSDNode>(CPTmp1)->getZExtValue();
2302 unsigned SOShOp = ARM_AM::getSORegShOp(SOVal);
2305 case ARM_AM::lsl: Opc = ARM::t2MOVCClsl; break;
2306 case ARM_AM::lsr: Opc = ARM::t2MOVCClsr; break;
2307 case ARM_AM::asr: Opc = ARM::t2MOVCCasr; break;
2308 case ARM_AM::ror: Opc = ARM::t2MOVCCror; break;
2310 llvm_unreachable("Unknown so_reg opcode!");
2313 CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32);
2314 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2315 SDValue Ops[] = { FalseVal, CPTmp0, SOShImm, CC, CCR, InFlag };
2316 return CurDAG->SelectNodeTo(N, Opc, MVT::i32,Ops, 6);
2321 SDNode *ARMDAGToDAGISel::
2322 SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2323 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2327 if (SelectImmShifterOperand(TrueVal, CPTmp0, CPTmp2)) {
2328 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2329 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp2, CC, CCR, InFlag };
2330 return CurDAG->SelectNodeTo(N, ARM::MOVCCsi, MVT::i32, Ops, 6);
2333 if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
2334 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2335 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag };
2336 return CurDAG->SelectNodeTo(N, ARM::MOVCCsr, MVT::i32, Ops, 7);
2341 SDNode *ARMDAGToDAGISel::
2342 SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2343 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2344 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2349 unsigned TrueImm = T->getZExtValue();
2350 if (is_t2_so_imm(TrueImm)) {
2351 Opc = ARM::t2MOVCCi;
2352 } else if (TrueImm <= 0xffff) {
2353 Opc = ARM::t2MOVCCi16;
2354 } else if (is_t2_so_imm_not(TrueImm)) {
2356 Opc = ARM::t2MVNCCi;
2357 } else if (TrueVal.getNode()->hasOneUse() && Subtarget->hasV6T2Ops()) {
2359 Opc = ARM::t2MOVCCi32imm;
2363 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2364 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2365 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2366 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2372 SDNode *ARMDAGToDAGISel::
2373 SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2374 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2375 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2380 unsigned TrueImm = T->getZExtValue();
2381 bool isSoImm = is_so_imm(TrueImm);
2384 } else if (Subtarget->hasV6T2Ops() && TrueImm <= 0xffff) {
2385 Opc = ARM::MOVCCi16;
2386 } else if (is_so_imm_not(TrueImm)) {
2389 } else if (TrueVal.getNode()->hasOneUse() &&
2390 (Subtarget->hasV6T2Ops() || ARM_AM::isSOImmTwoPartVal(TrueImm))) {
2392 Opc = ARM::MOVCCi32imm;
2396 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2397 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2398 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2399 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2405 SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
2406 EVT VT = N->getValueType(0);
2407 SDValue FalseVal = N->getOperand(0);
2408 SDValue TrueVal = N->getOperand(1);
2409 SDValue CC = N->getOperand(2);
2410 SDValue CCR = N->getOperand(3);
2411 SDValue InFlag = N->getOperand(4);
2412 assert(CC.getOpcode() == ISD::Constant);
2413 assert(CCR.getOpcode() == ISD::Register);
2414 ARMCC::CondCodes CCVal =
2415 (ARMCC::CondCodes)cast<ConstantSDNode>(CC)->getZExtValue();
2417 if (!Subtarget->isThumb1Only() && VT == MVT::i32) {
2418 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2419 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2420 // Pattern complexity = 18 cost = 1 size = 0
2421 if (Subtarget->isThumb()) {
2422 SDNode *Res = SelectT2CMOVShiftOp(N, FalseVal, TrueVal,
2423 CCVal, CCR, InFlag);
2425 Res = SelectT2CMOVShiftOp(N, TrueVal, FalseVal,
2426 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2430 SDNode *Res = SelectARMCMOVShiftOp(N, FalseVal, TrueVal,
2431 CCVal, CCR, InFlag);
2433 Res = SelectARMCMOVShiftOp(N, TrueVal, FalseVal,
2434 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2439 // Pattern: (ARMcmov:i32 GPR:i32:$false,
2440 // (imm:i32)<<P:Pred_so_imm>>:$true,
2442 // Emits: (MOVCCi:i32 GPR:i32:$false,
2443 // (so_imm:i32 (imm:i32):$true), (imm:i32):$cc)
2444 // Pattern complexity = 10 cost = 1 size = 0
2445 if (Subtarget->isThumb()) {
2446 SDNode *Res = SelectT2CMOVImmOp(N, FalseVal, TrueVal,
2447 CCVal, CCR, InFlag);
2449 Res = SelectT2CMOVImmOp(N, TrueVal, FalseVal,
2450 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2454 SDNode *Res = SelectARMCMOVImmOp(N, FalseVal, TrueVal,
2455 CCVal, CCR, InFlag);
2457 Res = SelectARMCMOVImmOp(N, TrueVal, FalseVal,
2458 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2464 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2465 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2466 // Pattern complexity = 6 cost = 1 size = 0
2468 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2469 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2470 // Pattern complexity = 6 cost = 11 size = 0
2472 // Also VMOVScc and VMOVDcc.
2473 SDValue Tmp2 = CurDAG->getTargetConstant(CCVal, MVT::i32);
2474 SDValue Ops[] = { FalseVal, TrueVal, Tmp2, CCR, InFlag };
2476 switch (VT.getSimpleVT().SimpleTy) {
2477 default: llvm_unreachable("Illegal conditional move type!");
2479 Opc = Subtarget->isThumb()
2480 ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr_pseudo)
2490 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5);
2493 /// Target-specific DAG combining for ISD::XOR.
2494 /// Target-independent combining lowers SELECT_CC nodes of the form
2495 /// select_cc setg[ge] X, 0, X, -X
2496 /// select_cc setgt X, -1, X, -X
2497 /// select_cc setl[te] X, 0, -X, X
2498 /// select_cc setlt X, 1, -X, X
2499 /// which represent Integer ABS into:
2500 /// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
2501 /// ARM instruction selection detects the latter and matches it to
2502 /// ARM::ABS or ARM::t2ABS machine node.
2503 SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
2504 SDValue XORSrc0 = N->getOperand(0);
2505 SDValue XORSrc1 = N->getOperand(1);
2506 EVT VT = N->getValueType(0);
2508 if (Subtarget->isThumb1Only())
2511 if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA)
2514 SDValue ADDSrc0 = XORSrc0.getOperand(0);
2515 SDValue ADDSrc1 = XORSrc0.getOperand(1);
2516 SDValue SRASrc0 = XORSrc1.getOperand(0);
2517 SDValue SRASrc1 = XORSrc1.getOperand(1);
2518 ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1);
2519 EVT XType = SRASrc0.getValueType();
2520 unsigned Size = XType.getSizeInBits() - 1;
2522 if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 &&
2523 XType.isInteger() && SRAConstant != NULL &&
2524 Size == SRAConstant->getZExtValue()) {
2525 unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
2526 return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
2532 SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
2533 // The only time a CONCAT_VECTORS operation can have legal types is when
2534 // two 64-bit vectors are concatenated to a 128-bit vector.
2535 EVT VT = N->getValueType(0);
2536 if (!VT.is128BitVector() || N->getNumOperands() != 2)
2537 llvm_unreachable("unexpected CONCAT_VECTORS");
2538 return createDRegPairNode(VT, N->getOperand(0), N->getOperand(1));
2541 SDNode *ARMDAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
2542 SmallVector<SDValue, 6> Ops;
2543 Ops.push_back(Node->getOperand(1)); // Ptr
2544 Ops.push_back(Node->getOperand(2)); // Low part of Val1
2545 Ops.push_back(Node->getOperand(3)); // High part of Val1
2546 if (Opc == ARM::ATOMCMPXCHG6432) {
2547 Ops.push_back(Node->getOperand(4)); // Low part of Val2
2548 Ops.push_back(Node->getOperand(5)); // High part of Val2
2550 Ops.push_back(Node->getOperand(0)); // Chain
2551 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2552 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
2553 SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node),
2554 MVT::i32, MVT::i32, MVT::Other,
2556 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
2560 SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
2563 if (N->isMachineOpcode())
2564 return NULL; // Already selected.
2566 switch (N->getOpcode()) {
2568 case ISD::INLINEASM: {
2569 SDNode *ResNode = SelectInlineAsm(N);
2575 // Select special operations if XOR node forms integer ABS pattern
2576 SDNode *ResNode = SelectABSOp(N);
2579 // Other cases are autogenerated.
2582 case ISD::Constant: {
2583 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
2585 if (Subtarget->hasThumb2())
2586 // Thumb2-aware targets have the MOVT instruction, so all immediates can
2587 // be done with MOV + MOVT, at worst.
2590 if (Subtarget->isThumb()) {
2591 UseCP = (Val > 255 && // MOV
2592 ~Val > 255 && // MOV + MVN
2593 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
2595 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
2596 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
2597 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
2602 CurDAG->getTargetConstantPool(ConstantInt::get(
2603 Type::getInt32Ty(*CurDAG->getContext()), Val),
2604 getTargetLowering()->getPointerTy());
2607 if (Subtarget->isThumb1Only()) {
2608 SDValue Pred = getAL(CurDAG);
2609 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2610 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
2611 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
2616 CurDAG->getTargetConstant(0, MVT::i32),
2618 CurDAG->getRegister(0, MVT::i32),
2619 CurDAG->getEntryNode()
2621 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
2624 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
2628 // Other cases are autogenerated.
2631 case ISD::FrameIndex: {
2632 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
2633 int FI = cast<FrameIndexSDNode>(N)->getIndex();
2634 SDValue TFI = CurDAG->getTargetFrameIndex(FI,
2635 getTargetLowering()->getPointerTy());
2636 if (Subtarget->isThumb1Only()) {
2637 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2638 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2639 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, Ops, 4);
2641 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
2642 ARM::t2ADDri : ARM::ADDri);
2643 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2644 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2645 CurDAG->getRegister(0, MVT::i32) };
2646 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2650 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2654 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
2658 if (Subtarget->isThumb1Only())
2660 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
2661 unsigned RHSV = C->getZExtValue();
2663 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
2664 unsigned ShImm = Log2_32(RHSV-1);
2667 SDValue V = N->getOperand(0);
2668 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2669 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2670 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2671 if (Subtarget->isThumb()) {
2672 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2673 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6);
2675 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2676 return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops, 7);
2679 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
2680 unsigned ShImm = Log2_32(RHSV+1);
2683 SDValue V = N->getOperand(0);
2684 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2685 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2686 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2687 if (Subtarget->isThumb()) {
2688 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2689 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6);
2691 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2692 return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops, 7);
2698 // Check for unsigned bitfield extract
2699 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2702 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
2703 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
2704 // are entirely contributed by c2 and lower 16-bits are entirely contributed
2705 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
2706 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
2707 EVT VT = N->getValueType(0);
2710 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
2712 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
2715 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2716 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2719 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
2720 SDValue N2 = N0.getOperand(1);
2721 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2724 unsigned N1CVal = N1C->getZExtValue();
2725 unsigned N2CVal = N2C->getZExtValue();
2726 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
2727 (N1CVal & 0xffffU) == 0xffffU &&
2728 (N2CVal & 0xffffU) == 0x0U) {
2729 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
2731 SDValue Ops[] = { N0.getOperand(0), Imm16,
2732 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2733 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2738 case ARMISD::VMOVRRD:
2739 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
2740 N->getOperand(0), getAL(CurDAG),
2741 CurDAG->getRegister(0, MVT::i32));
2742 case ISD::UMUL_LOHI: {
2743 if (Subtarget->isThumb1Only())
2745 if (Subtarget->isThumb()) {
2746 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2747 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2748 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops);
2750 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2751 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2752 CurDAG->getRegister(0, MVT::i32) };
2753 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2754 ARM::UMULL : ARM::UMULLv5,
2755 dl, MVT::i32, MVT::i32, Ops);
2758 case ISD::SMUL_LOHI: {
2759 if (Subtarget->isThumb1Only())
2761 if (Subtarget->isThumb()) {
2762 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2763 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2764 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops);
2766 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2767 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2768 CurDAG->getRegister(0, MVT::i32) };
2769 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2770 ARM::SMULL : ARM::SMULLv5,
2771 dl, MVT::i32, MVT::i32, Ops);
2774 case ARMISD::UMLAL:{
2775 if (Subtarget->isThumb()) {
2776 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2777 N->getOperand(3), getAL(CurDAG),
2778 CurDAG->getRegister(0, MVT::i32)};
2779 return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops);
2781 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2782 N->getOperand(3), getAL(CurDAG),
2783 CurDAG->getRegister(0, MVT::i32),
2784 CurDAG->getRegister(0, MVT::i32) };
2785 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2786 ARM::UMLAL : ARM::UMLALv5,
2787 dl, MVT::i32, MVT::i32, Ops);
2790 case ARMISD::SMLAL:{
2791 if (Subtarget->isThumb()) {
2792 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2793 N->getOperand(3), getAL(CurDAG),
2794 CurDAG->getRegister(0, MVT::i32)};
2795 return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops);
2797 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2798 N->getOperand(3), getAL(CurDAG),
2799 CurDAG->getRegister(0, MVT::i32),
2800 CurDAG->getRegister(0, MVT::i32) };
2801 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2802 ARM::SMLAL : ARM::SMLALv5,
2803 dl, MVT::i32, MVT::i32, Ops);
2807 SDNode *ResNode = 0;
2808 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2809 ResNode = SelectT2IndexedLoad(N);
2811 ResNode = SelectARMIndexedLoad(N);
2814 // Other cases are autogenerated.
2817 case ARMISD::BRCOND: {
2818 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2819 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2820 // Pattern complexity = 6 cost = 1 size = 0
2822 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2823 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
2824 // Pattern complexity = 6 cost = 1 size = 0
2826 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2827 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2828 // Pattern complexity = 6 cost = 1 size = 0
2830 unsigned Opc = Subtarget->isThumb() ?
2831 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
2832 SDValue Chain = N->getOperand(0);
2833 SDValue N1 = N->getOperand(1);
2834 SDValue N2 = N->getOperand(2);
2835 SDValue N3 = N->getOperand(3);
2836 SDValue InFlag = N->getOperand(4);
2837 assert(N1.getOpcode() == ISD::BasicBlock);
2838 assert(N2.getOpcode() == ISD::Constant);
2839 assert(N3.getOpcode() == ISD::Register);
2841 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
2842 cast<ConstantSDNode>(N2)->getZExtValue()),
2844 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
2845 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
2847 Chain = SDValue(ResNode, 0);
2848 if (N->getNumValues() == 2) {
2849 InFlag = SDValue(ResNode, 1);
2850 ReplaceUses(SDValue(N, 1), InFlag);
2852 ReplaceUses(SDValue(N, 0),
2853 SDValue(Chain.getNode(), Chain.getResNo()));
2857 return SelectCMOVOp(N);
2858 case ARMISD::VZIP: {
2860 EVT VT = N->getValueType(0);
2861 switch (VT.getSimpleVT().SimpleTy) {
2862 default: return NULL;
2863 case MVT::v8i8: Opc = ARM::VZIPd8; break;
2864 case MVT::v4i16: Opc = ARM::VZIPd16; break;
2866 // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2867 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2868 case MVT::v16i8: Opc = ARM::VZIPq8; break;
2869 case MVT::v8i16: Opc = ARM::VZIPq16; break;
2871 case MVT::v4i32: Opc = ARM::VZIPq32; break;
2873 SDValue Pred = getAL(CurDAG);
2874 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2875 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2876 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2878 case ARMISD::VUZP: {
2880 EVT VT = N->getValueType(0);
2881 switch (VT.getSimpleVT().SimpleTy) {
2882 default: return NULL;
2883 case MVT::v8i8: Opc = ARM::VUZPd8; break;
2884 case MVT::v4i16: Opc = ARM::VUZPd16; break;
2886 // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2887 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2888 case MVT::v16i8: Opc = ARM::VUZPq8; break;
2889 case MVT::v8i16: Opc = ARM::VUZPq16; break;
2891 case MVT::v4i32: Opc = ARM::VUZPq32; break;
2893 SDValue Pred = getAL(CurDAG);
2894 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2895 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2896 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2898 case ARMISD::VTRN: {
2900 EVT VT = N->getValueType(0);
2901 switch (VT.getSimpleVT().SimpleTy) {
2902 default: return NULL;
2903 case MVT::v8i8: Opc = ARM::VTRNd8; break;
2904 case MVT::v4i16: Opc = ARM::VTRNd16; break;
2906 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2907 case MVT::v16i8: Opc = ARM::VTRNq8; break;
2908 case MVT::v8i16: Opc = ARM::VTRNq16; break;
2910 case MVT::v4i32: Opc = ARM::VTRNq32; break;
2912 SDValue Pred = getAL(CurDAG);
2913 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2914 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2915 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2917 case ARMISD::BUILD_VECTOR: {
2918 EVT VecVT = N->getValueType(0);
2919 EVT EltVT = VecVT.getVectorElementType();
2920 unsigned NumElts = VecVT.getVectorNumElements();
2921 if (EltVT == MVT::f64) {
2922 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
2923 return createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
2925 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
2927 return createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
2928 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
2929 return createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1),
2930 N->getOperand(2), N->getOperand(3));
2933 case ARMISD::VLD2DUP: {
2934 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
2936 return SelectVLDDup(N, false, 2, Opcodes);
2939 case ARMISD::VLD3DUP: {
2940 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
2941 ARM::VLD3DUPd16Pseudo,
2942 ARM::VLD3DUPd32Pseudo };
2943 return SelectVLDDup(N, false, 3, Opcodes);
2946 case ARMISD::VLD4DUP: {
2947 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
2948 ARM::VLD4DUPd16Pseudo,
2949 ARM::VLD4DUPd32Pseudo };
2950 return SelectVLDDup(N, false, 4, Opcodes);
2953 case ARMISD::VLD2DUP_UPD: {
2954 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8wb_fixed,
2955 ARM::VLD2DUPd16wb_fixed,
2956 ARM::VLD2DUPd32wb_fixed };
2957 return SelectVLDDup(N, true, 2, Opcodes);
2960 case ARMISD::VLD3DUP_UPD: {
2961 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
2962 ARM::VLD3DUPd16Pseudo_UPD,
2963 ARM::VLD3DUPd32Pseudo_UPD };
2964 return SelectVLDDup(N, true, 3, Opcodes);
2967 case ARMISD::VLD4DUP_UPD: {
2968 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
2969 ARM::VLD4DUPd16Pseudo_UPD,
2970 ARM::VLD4DUPd32Pseudo_UPD };
2971 return SelectVLDDup(N, true, 4, Opcodes);
2974 case ARMISD::VLD1_UPD: {
2975 static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
2976 ARM::VLD1d16wb_fixed,
2977 ARM::VLD1d32wb_fixed,
2978 ARM::VLD1d64wb_fixed };
2979 static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
2980 ARM::VLD1q16wb_fixed,
2981 ARM::VLD1q32wb_fixed,
2982 ARM::VLD1q64wb_fixed };
2983 return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0);
2986 case ARMISD::VLD2_UPD: {
2987 static const uint16_t DOpcodes[] = { ARM::VLD2d8wb_fixed,
2988 ARM::VLD2d16wb_fixed,
2989 ARM::VLD2d32wb_fixed,
2990 ARM::VLD1q64wb_fixed};
2991 static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
2992 ARM::VLD2q16PseudoWB_fixed,
2993 ARM::VLD2q32PseudoWB_fixed };
2994 return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0);
2997 case ARMISD::VLD3_UPD: {
2998 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
2999 ARM::VLD3d16Pseudo_UPD,
3000 ARM::VLD3d32Pseudo_UPD,
3001 ARM::VLD1q64wb_fixed};
3002 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
3003 ARM::VLD3q16Pseudo_UPD,
3004 ARM::VLD3q32Pseudo_UPD };
3005 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
3006 ARM::VLD3q16oddPseudo_UPD,
3007 ARM::VLD3q32oddPseudo_UPD };
3008 return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
3011 case ARMISD::VLD4_UPD: {
3012 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo_UPD,
3013 ARM::VLD4d16Pseudo_UPD,
3014 ARM::VLD4d32Pseudo_UPD,
3015 ARM::VLD1q64wb_fixed};
3016 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
3017 ARM::VLD4q16Pseudo_UPD,
3018 ARM::VLD4q32Pseudo_UPD };
3019 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
3020 ARM::VLD4q16oddPseudo_UPD,
3021 ARM::VLD4q32oddPseudo_UPD };
3022 return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
3025 case ARMISD::VLD2LN_UPD: {
3026 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
3027 ARM::VLD2LNd16Pseudo_UPD,
3028 ARM::VLD2LNd32Pseudo_UPD };
3029 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
3030 ARM::VLD2LNq32Pseudo_UPD };
3031 return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
3034 case ARMISD::VLD3LN_UPD: {
3035 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
3036 ARM::VLD3LNd16Pseudo_UPD,
3037 ARM::VLD3LNd32Pseudo_UPD };
3038 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
3039 ARM::VLD3LNq32Pseudo_UPD };
3040 return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
3043 case ARMISD::VLD4LN_UPD: {
3044 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
3045 ARM::VLD4LNd16Pseudo_UPD,
3046 ARM::VLD4LNd32Pseudo_UPD };
3047 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
3048 ARM::VLD4LNq32Pseudo_UPD };
3049 return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
3052 case ARMISD::VST1_UPD: {
3053 static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
3054 ARM::VST1d16wb_fixed,
3055 ARM::VST1d32wb_fixed,
3056 ARM::VST1d64wb_fixed };
3057 static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
3058 ARM::VST1q16wb_fixed,
3059 ARM::VST1q32wb_fixed,
3060 ARM::VST1q64wb_fixed };
3061 return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0);
3064 case ARMISD::VST2_UPD: {
3065 static const uint16_t DOpcodes[] = { ARM::VST2d8wb_fixed,
3066 ARM::VST2d16wb_fixed,
3067 ARM::VST2d32wb_fixed,
3068 ARM::VST1q64wb_fixed};
3069 static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
3070 ARM::VST2q16PseudoWB_fixed,
3071 ARM::VST2q32PseudoWB_fixed };
3072 return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0);
3075 case ARMISD::VST3_UPD: {
3076 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
3077 ARM::VST3d16Pseudo_UPD,
3078 ARM::VST3d32Pseudo_UPD,
3079 ARM::VST1d64TPseudoWB_fixed};
3080 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
3081 ARM::VST3q16Pseudo_UPD,
3082 ARM::VST3q32Pseudo_UPD };
3083 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
3084 ARM::VST3q16oddPseudo_UPD,
3085 ARM::VST3q32oddPseudo_UPD };
3086 return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
3089 case ARMISD::VST4_UPD: {
3090 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo_UPD,
3091 ARM::VST4d16Pseudo_UPD,
3092 ARM::VST4d32Pseudo_UPD,
3093 ARM::VST1d64QPseudoWB_fixed};
3094 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
3095 ARM::VST4q16Pseudo_UPD,
3096 ARM::VST4q32Pseudo_UPD };
3097 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
3098 ARM::VST4q16oddPseudo_UPD,
3099 ARM::VST4q32oddPseudo_UPD };
3100 return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
3103 case ARMISD::VST2LN_UPD: {
3104 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
3105 ARM::VST2LNd16Pseudo_UPD,
3106 ARM::VST2LNd32Pseudo_UPD };
3107 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
3108 ARM::VST2LNq32Pseudo_UPD };
3109 return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
3112 case ARMISD::VST3LN_UPD: {
3113 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
3114 ARM::VST3LNd16Pseudo_UPD,
3115 ARM::VST3LNd32Pseudo_UPD };
3116 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
3117 ARM::VST3LNq32Pseudo_UPD };
3118 return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
3121 case ARMISD::VST4LN_UPD: {
3122 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
3123 ARM::VST4LNd16Pseudo_UPD,
3124 ARM::VST4LNd32Pseudo_UPD };
3125 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
3126 ARM::VST4LNq32Pseudo_UPD };
3127 return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
3130 case ISD::INTRINSIC_VOID:
3131 case ISD::INTRINSIC_W_CHAIN: {
3132 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
3137 case Intrinsic::arm_ldrexd: {
3138 SDValue MemAddr = N->getOperand(2);
3140 SDValue Chain = N->getOperand(0);
3142 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
3143 unsigned NewOpc = isThumb ? ARM::t2LDREXD :ARM::LDREXD;
3145 // arm_ldrexd returns a i64 value in {i32, i32}
3146 std::vector<EVT> ResTys;
3148 ResTys.push_back(MVT::i32);
3149 ResTys.push_back(MVT::i32);
3151 ResTys.push_back(MVT::Untyped);
3152 ResTys.push_back(MVT::Other);
3154 // Place arguments in the right order.
3155 SmallVector<SDValue, 7> Ops;
3156 Ops.push_back(MemAddr);
3157 Ops.push_back(getAL(CurDAG));
3158 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3159 Ops.push_back(Chain);
3160 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
3161 // Transfer memoperands.
3162 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3163 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3164 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
3167 SDValue OutChain = isThumb ? SDValue(Ld, 2) : SDValue(Ld, 1);
3168 if (!SDValue(N, 0).use_empty()) {
3171 Result = SDValue(Ld, 0);
3173 SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_0, MVT::i32);
3174 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3175 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
3176 Result = SDValue(ResNode,0);
3178 ReplaceUses(SDValue(N, 0), Result);
3180 if (!SDValue(N, 1).use_empty()) {
3183 Result = SDValue(Ld, 1);
3185 SDValue SubRegIdx = CurDAG->getTargetConstant(ARM::gsub_1, MVT::i32);
3186 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3187 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
3188 Result = SDValue(ResNode,0);
3190 ReplaceUses(SDValue(N, 1), Result);
3192 ReplaceUses(SDValue(N, 2), OutChain);
3196 case Intrinsic::arm_strexd: {
3198 SDValue Chain = N->getOperand(0);
3199 SDValue Val0 = N->getOperand(2);
3200 SDValue Val1 = N->getOperand(3);
3201 SDValue MemAddr = N->getOperand(4);
3203 // Store exclusive double return a i32 value which is the return status
3204 // of the issued store.
3205 EVT ResTys[] = { MVT::i32, MVT::Other };
3207 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
3208 // Place arguments in the right order.
3209 SmallVector<SDValue, 7> Ops;
3211 Ops.push_back(Val0);
3212 Ops.push_back(Val1);
3214 // arm_strexd uses GPRPair.
3215 Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, Val0, Val1), 0));
3216 Ops.push_back(MemAddr);
3217 Ops.push_back(getAL(CurDAG));
3218 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3219 Ops.push_back(Chain);
3221 unsigned NewOpc = isThumb ? ARM::t2STREXD : ARM::STREXD;
3223 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
3224 // Transfer memoperands.
3225 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3226 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3227 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
3232 case Intrinsic::arm_neon_vld1: {
3233 static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
3234 ARM::VLD1d32, ARM::VLD1d64 };
3235 static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
3236 ARM::VLD1q32, ARM::VLD1q64};
3237 return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0);
3240 case Intrinsic::arm_neon_vld2: {
3241 static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
3242 ARM::VLD2d32, ARM::VLD1q64 };
3243 static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
3244 ARM::VLD2q32Pseudo };
3245 return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0);
3248 case Intrinsic::arm_neon_vld3: {
3249 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
3252 ARM::VLD1d64TPseudo };
3253 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
3254 ARM::VLD3q16Pseudo_UPD,
3255 ARM::VLD3q32Pseudo_UPD };
3256 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
3257 ARM::VLD3q16oddPseudo,
3258 ARM::VLD3q32oddPseudo };
3259 return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3262 case Intrinsic::arm_neon_vld4: {
3263 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
3266 ARM::VLD1d64QPseudo };
3267 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
3268 ARM::VLD4q16Pseudo_UPD,
3269 ARM::VLD4q32Pseudo_UPD };
3270 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
3271 ARM::VLD4q16oddPseudo,
3272 ARM::VLD4q32oddPseudo };
3273 return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3276 case Intrinsic::arm_neon_vld2lane: {
3277 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
3278 ARM::VLD2LNd16Pseudo,
3279 ARM::VLD2LNd32Pseudo };
3280 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
3281 ARM::VLD2LNq32Pseudo };
3282 return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
3285 case Intrinsic::arm_neon_vld3lane: {
3286 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
3287 ARM::VLD3LNd16Pseudo,
3288 ARM::VLD3LNd32Pseudo };
3289 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
3290 ARM::VLD3LNq32Pseudo };
3291 return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
3294 case Intrinsic::arm_neon_vld4lane: {
3295 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
3296 ARM::VLD4LNd16Pseudo,
3297 ARM::VLD4LNd32Pseudo };
3298 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
3299 ARM::VLD4LNq32Pseudo };
3300 return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
3303 case Intrinsic::arm_neon_vst1: {
3304 static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
3305 ARM::VST1d32, ARM::VST1d64 };
3306 static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
3307 ARM::VST1q32, ARM::VST1q64 };
3308 return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0);
3311 case Intrinsic::arm_neon_vst2: {
3312 static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
3313 ARM::VST2d32, ARM::VST1q64 };
3314 static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
3315 ARM::VST2q32Pseudo };
3316 return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0);
3319 case Intrinsic::arm_neon_vst3: {
3320 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
3323 ARM::VST1d64TPseudo };
3324 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
3325 ARM::VST3q16Pseudo_UPD,
3326 ARM::VST3q32Pseudo_UPD };
3327 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
3328 ARM::VST3q16oddPseudo,
3329 ARM::VST3q32oddPseudo };
3330 return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3333 case Intrinsic::arm_neon_vst4: {
3334 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
3337 ARM::VST1d64QPseudo };
3338 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
3339 ARM::VST4q16Pseudo_UPD,
3340 ARM::VST4q32Pseudo_UPD };
3341 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
3342 ARM::VST4q16oddPseudo,
3343 ARM::VST4q32oddPseudo };
3344 return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3347 case Intrinsic::arm_neon_vst2lane: {
3348 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
3349 ARM::VST2LNd16Pseudo,
3350 ARM::VST2LNd32Pseudo };
3351 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
3352 ARM::VST2LNq32Pseudo };
3353 return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
3356 case Intrinsic::arm_neon_vst3lane: {
3357 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
3358 ARM::VST3LNd16Pseudo,
3359 ARM::VST3LNd32Pseudo };
3360 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
3361 ARM::VST3LNq32Pseudo };
3362 return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
3365 case Intrinsic::arm_neon_vst4lane: {
3366 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
3367 ARM::VST4LNd16Pseudo,
3368 ARM::VST4LNd32Pseudo };
3369 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
3370 ARM::VST4LNq32Pseudo };
3371 return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
3377 case ISD::INTRINSIC_WO_CHAIN: {
3378 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3383 case Intrinsic::arm_neon_vtbl2:
3384 return SelectVTBL(N, false, 2, ARM::VTBL2);
3385 case Intrinsic::arm_neon_vtbl3:
3386 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
3387 case Intrinsic::arm_neon_vtbl4:
3388 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
3390 case Intrinsic::arm_neon_vtbx2:
3391 return SelectVTBL(N, true, 2, ARM::VTBX2);
3392 case Intrinsic::arm_neon_vtbx3:
3393 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
3394 case Intrinsic::arm_neon_vtbx4:
3395 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
3400 case ARMISD::VTBL1: {
3402 EVT VT = N->getValueType(0);
3403 SmallVector<SDValue, 6> Ops;
3405 Ops.push_back(N->getOperand(0));
3406 Ops.push_back(N->getOperand(1));
3407 Ops.push_back(getAL(CurDAG)); // Predicate
3408 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3409 return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops);
3411 case ARMISD::VTBL2: {
3413 EVT VT = N->getValueType(0);
3415 // Form a REG_SEQUENCE to force register allocation.
3416 SDValue V0 = N->getOperand(0);
3417 SDValue V1 = N->getOperand(1);
3418 SDValue RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
3420 SmallVector<SDValue, 6> Ops;
3421 Ops.push_back(RegSeq);
3422 Ops.push_back(N->getOperand(2));
3423 Ops.push_back(getAL(CurDAG)); // Predicate
3424 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3425 return CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops);
3428 case ISD::CONCAT_VECTORS:
3429 return SelectConcatVector(N);
3431 case ARMISD::ATOMOR64_DAG:
3432 return SelectAtomic64(N, ARM::ATOMOR6432);
3433 case ARMISD::ATOMXOR64_DAG:
3434 return SelectAtomic64(N, ARM::ATOMXOR6432);
3435 case ARMISD::ATOMADD64_DAG:
3436 return SelectAtomic64(N, ARM::ATOMADD6432);
3437 case ARMISD::ATOMSUB64_DAG:
3438 return SelectAtomic64(N, ARM::ATOMSUB6432);
3439 case ARMISD::ATOMNAND64_DAG:
3440 return SelectAtomic64(N, ARM::ATOMNAND6432);
3441 case ARMISD::ATOMAND64_DAG:
3442 return SelectAtomic64(N, ARM::ATOMAND6432);
3443 case ARMISD::ATOMSWAP64_DAG:
3444 return SelectAtomic64(N, ARM::ATOMSWAP6432);
3445 case ARMISD::ATOMCMPXCHG64_DAG:
3446 return SelectAtomic64(N, ARM::ATOMCMPXCHG6432);
3448 case ARMISD::ATOMMIN64_DAG:
3449 return SelectAtomic64(N, ARM::ATOMMIN6432);
3450 case ARMISD::ATOMUMIN64_DAG:
3451 return SelectAtomic64(N, ARM::ATOMUMIN6432);
3452 case ARMISD::ATOMMAX64_DAG:
3453 return SelectAtomic64(N, ARM::ATOMMAX6432);
3454 case ARMISD::ATOMUMAX64_DAG:
3455 return SelectAtomic64(N, ARM::ATOMUMAX6432);
3458 return SelectCode(N);
3461 SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
3462 std::vector<SDValue> AsmNodeOperands;
3463 unsigned Flag, Kind;
3464 bool Changed = false;
3465 unsigned NumOps = N->getNumOperands();
3467 // Normally, i64 data is bounded to two arbitrary GRPs for "%r" constraint.
3468 // However, some instrstions (e.g. ldrexd/strexd in ARM mode) require
3469 // (even/even+1) GPRs and use %n and %Hn to refer to the individual regs
3470 // respectively. Since there is no constraint to explicitly specify a
3471 // reg pair, we use GPRPair reg class for "%r" for 64-bit data. For Thumb,
3472 // the 64-bit data may be referred by H, Q, R modifiers, so we still pack
3473 // them into a GPRPair.
3476 SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1) : SDValue(0,0);
3478 SmallVector<bool, 8> OpChanged;
3479 // Glue node will be appended late.
3480 for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) {
3481 SDValue op = N->getOperand(i);
3482 AsmNodeOperands.push_back(op);
3484 if (i < InlineAsm::Op_FirstOperand)
3487 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(i))) {
3488 Flag = C->getZExtValue();
3489 Kind = InlineAsm::getKind(Flag);
3494 // Immediate operands to inline asm in the SelectionDAG are modeled with
3495 // two operands. The first is a constant of value InlineAsm::Kind_Imm, and
3496 // the second is a constant with the value of the immediate. If we get here
3497 // and we have a Kind_Imm, skip the next operand, and continue.
3498 if (Kind == InlineAsm::Kind_Imm) {
3499 SDValue op = N->getOperand(++i);
3500 AsmNodeOperands.push_back(op);
3504 unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag);
3506 OpChanged.push_back(false);
3508 unsigned DefIdx = 0;
3509 bool IsTiedToChangedOp = false;
3510 // If it's a use that is tied with a previous def, it has no
3511 // reg class constraint.
3512 if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx))
3513 IsTiedToChangedOp = OpChanged[DefIdx];
3515 if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef
3516 && Kind != InlineAsm::Kind_RegDefEarlyClobber)
3520 bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC);
3521 if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID))
3525 assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
3526 SDValue V0 = N->getOperand(i+1);
3527 SDValue V1 = N->getOperand(i+2);
3528 unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
3529 unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg();
3531 MachineRegisterInfo &MRI = MF->getRegInfo();
3533 if (Kind == InlineAsm::Kind_RegDef ||
3534 Kind == InlineAsm::Kind_RegDefEarlyClobber) {
3535 // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to
3536 // the original GPRs.
3538 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
3539 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
3540 SDValue Chain = SDValue(N,0);
3542 SDNode *GU = N->getGluedUser();
3543 SDValue RegCopy = CurDAG->getCopyFromReg(Chain, dl, GPVR, MVT::Untyped,
3546 // Extract values from a GPRPair reg and copy to the original GPR reg.
3547 SDValue Sub0 = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
3549 SDValue Sub1 = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
3551 SDValue T0 = CurDAG->getCopyToReg(Sub0, dl, Reg0, Sub0,
3552 RegCopy.getValue(1));
3553 SDValue T1 = CurDAG->getCopyToReg(Sub1, dl, Reg1, Sub1, T0.getValue(1));
3555 // Update the original glue user.
3556 std::vector<SDValue> Ops(GU->op_begin(), GU->op_end()-1);
3557 Ops.push_back(T1.getValue(1));
3558 CurDAG->UpdateNodeOperands(GU, &Ops[0], Ops.size());
3562 // For Kind == InlineAsm::Kind_RegUse, we first copy two GPRs into a
3563 // GPRPair and then pass the GPRPair to the inline asm.
3564 SDValue Chain = AsmNodeOperands[InlineAsm::Op_InputChain];
3566 // As REG_SEQ doesn't take RegisterSDNode, we copy them first.
3567 SDValue T0 = CurDAG->getCopyFromReg(Chain, dl, Reg0, MVT::i32,
3569 SDValue T1 = CurDAG->getCopyFromReg(Chain, dl, Reg1, MVT::i32,
3571 SDValue Pair = SDValue(createGPRPairNode(MVT::Untyped, T0, T1), 0);
3573 // Copy REG_SEQ into a GPRPair-typed VR and replace the original two
3574 // i32 VRs of inline asm with it.
3575 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
3576 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
3577 Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1));
3579 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
3580 Glue = Chain.getValue(1);
3585 if(PairedReg.getNode()) {
3586 OpChanged[OpChanged.size() -1 ] = true;
3587 Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/);
3588 Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID);
3589 // Replace the current flag.
3590 AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant(
3592 // Add the new register node and skip the original two GPRs.
3593 AsmNodeOperands.push_back(PairedReg);
3594 // Skip the next two GPRs.
3600 AsmNodeOperands.push_back(Glue);
3604 SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
3605 CurDAG->getVTList(MVT::Other, MVT::Glue), &AsmNodeOperands[0],
3606 AsmNodeOperands.size());
3608 return New.getNode();
3612 bool ARMDAGToDAGISel::
3613 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
3614 std::vector<SDValue> &OutOps) {
3615 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
3616 // Require the address to be in a register. That is safe for all ARM
3617 // variants and it is hard to do anything much smarter without knowing
3618 // how the operand is used.
3619 OutOps.push_back(Op);
3623 /// createARMISelDag - This pass converts a legalized DAG into a
3624 /// ARM-specific DAG, ready for instruction scheduling.
3626 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
3627 CodeGenOpt::Level OptLevel) {
3628 return new ARMDAGToDAGISel(TM, OptLevel);