1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "arm-isel"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMTargetMachine.h"
18 #include "MCTargetDesc/ARMAddressingModes.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/LLVMContext.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/SelectionDAGISel.h"
30 #include "llvm/Target/TargetLowering.h"
31 #include "llvm/Target/TargetOptions.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Compiler.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
41 DisableShifterOp("disable-shifter-op", cl::Hidden,
42 cl::desc("Disable isel of shifter-op"),
46 CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
47 cl::desc("Check fp vmla / vmls hazard at isel time"),
51 DisableARMIntABS("disable-arm-int-abs", cl::Hidden,
52 cl::desc("Enable / disable ARM integer abs transform"),
55 //===--------------------------------------------------------------------===//
56 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
57 /// instructions for SelectionDAG operations.
62 AM2_BASE, // Simple AM2 (+-imm12)
63 AM2_SHOP // Shifter-op AM2
66 class ARMDAGToDAGISel : public SelectionDAGISel {
67 ARMBaseTargetMachine &TM;
68 const ARMBaseInstrInfo *TII;
70 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
71 /// make the right decision when generating code for different targets.
72 const ARMSubtarget *Subtarget;
75 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
76 CodeGenOpt::Level OptLevel)
77 : SelectionDAGISel(tm, OptLevel), TM(tm),
78 TII(static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo())),
79 Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
82 virtual const char *getPassName() const {
83 return "ARM Instruction Selection";
86 /// getI32Imm - Return a target constant of type i32 with the specified
88 inline SDValue getI32Imm(unsigned Imm) {
89 return CurDAG->getTargetConstant(Imm, MVT::i32);
92 SDNode *Select(SDNode *N);
95 bool hasNoVMLxHazardUse(SDNode *N) const;
96 bool isShifterOpProfitable(const SDValue &Shift,
97 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
98 bool SelectRegShifterOperand(SDValue N, SDValue &A,
99 SDValue &B, SDValue &C,
100 bool CheckProfitability = true);
101 bool SelectImmShifterOperand(SDValue N, SDValue &A,
102 SDValue &B, bool CheckProfitability = true);
103 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A,
104 SDValue &B, SDValue &C) {
105 // Don't apply the profitability check
106 return SelectRegShifterOperand(N, A, B, C, false);
108 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A,
110 // Don't apply the profitability check
111 return SelectImmShifterOperand(N, A, B, false);
114 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
115 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
117 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
118 SDValue &Offset, SDValue &Opc);
119 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
121 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
124 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
126 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
129 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
131 SelectAddrMode2Worker(N, Base, Offset, Opc);
132 // return SelectAddrMode2ShOp(N, Base, Offset, Opc);
133 // This always matches one way or another.
137 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
138 SDValue &Offset, SDValue &Opc);
139 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
140 SDValue &Offset, SDValue &Opc);
141 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
142 SDValue &Offset, SDValue &Opc);
143 bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
144 bool SelectAddrMode3(SDValue N, SDValue &Base,
145 SDValue &Offset, SDValue &Opc);
146 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
147 SDValue &Offset, SDValue &Opc);
148 bool SelectAddrMode5(SDValue N, SDValue &Base,
150 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
151 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
153 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
155 // Thumb Addressing Modes:
156 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
157 bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset,
159 bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset);
160 bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset);
161 bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset);
162 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
164 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
166 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
168 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
170 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
172 // Thumb 2 Addressing Modes:
173 bool SelectT2ShifterOperandReg(SDValue N,
174 SDValue &BaseReg, SDValue &Opc);
175 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
176 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
178 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
180 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
181 SDValue &OffReg, SDValue &ShImm);
183 inline bool is_so_imm(unsigned Imm) const {
184 return ARM_AM::getSOImmVal(Imm) != -1;
187 inline bool is_so_imm_not(unsigned Imm) const {
188 return ARM_AM::getSOImmVal(~Imm) != -1;
191 inline bool is_t2_so_imm(unsigned Imm) const {
192 return ARM_AM::getT2SOImmVal(Imm) != -1;
195 inline bool is_t2_so_imm_not(unsigned Imm) const {
196 return ARM_AM::getT2SOImmVal(~Imm) != -1;
199 // Include the pieces autogenerated from the target description.
200 #include "ARMGenDAGISel.inc"
203 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
205 SDNode *SelectARMIndexedLoad(SDNode *N);
206 SDNode *SelectT2IndexedLoad(SDNode *N);
208 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
209 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
210 /// loads of D registers and even subregs and odd subregs of Q registers.
211 /// For NumVecs <= 2, QOpcodes1 is not used.
212 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
213 const uint16_t *DOpcodes,
214 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
216 /// SelectVST - Select NEON store intrinsics. NumVecs should
217 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
218 /// stores of D registers and even subregs and odd subregs of Q registers.
219 /// For NumVecs <= 2, QOpcodes1 is not used.
220 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
221 const uint16_t *DOpcodes,
222 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
224 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
225 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
226 /// load/store of D registers and Q registers.
227 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
228 bool isUpdating, unsigned NumVecs,
229 const uint16_t *DOpcodes, const uint16_t *QOpcodes);
231 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
232 /// should be 2, 3 or 4. The opcode array specifies the instructions used
233 /// for loading D registers. (Q registers are not supported.)
234 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
235 const uint16_t *Opcodes);
237 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
238 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
239 /// generated to force the table registers to be consecutive.
240 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
242 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
243 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
245 /// SelectCMOVOp - Select CMOV instructions for ARM.
246 SDNode *SelectCMOVOp(SDNode *N);
247 SDNode *SelectConditionalOp(SDNode *N);
248 SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
249 ARMCC::CondCodes CCVal, SDValue CCR,
251 SDNode *SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
252 ARMCC::CondCodes CCVal, SDValue CCR,
254 SDNode *SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
255 ARMCC::CondCodes CCVal, SDValue CCR,
257 SDNode *SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
258 ARMCC::CondCodes CCVal, SDValue CCR,
261 // Select special operations if node forms integer ABS pattern
262 SDNode *SelectABSOp(SDNode *N);
264 SDNode *SelectConcatVector(SDNode *N);
266 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
268 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
269 /// inline asm expressions.
270 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
272 std::vector<SDValue> &OutOps);
274 // Form pairs of consecutive S, D, or Q registers.
275 SDNode *PairSRegs(EVT VT, SDValue V0, SDValue V1);
276 SDNode *PairDRegs(EVT VT, SDValue V0, SDValue V1);
277 SDNode *PairQRegs(EVT VT, SDValue V0, SDValue V1);
279 // Form sequences of 4 consecutive S, D, or Q registers.
280 SDNode *QuadSRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
281 SDNode *QuadDRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
282 SDNode *QuadQRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
284 // Get the alignment operand for a NEON VLD or VST instruction.
285 SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
289 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
290 /// operand. If so Imm will receive the 32-bit value.
291 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
292 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
293 Imm = cast<ConstantSDNode>(N)->getZExtValue();
299 // isInt32Immediate - This method tests to see if a constant operand.
300 // If so Imm will receive the 32 bit value.
301 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
302 return isInt32Immediate(N.getNode(), Imm);
305 // isOpcWithIntImmediate - This method tests to see if the node is a specific
306 // opcode and that it has a immediate integer right operand.
307 // If so Imm will receive the 32 bit value.
308 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
309 return N->getOpcode() == Opc &&
310 isInt32Immediate(N->getOperand(1).getNode(), Imm);
313 /// \brief Check whether a particular node is a constant value representable as
314 /// (N * Scale) where (N in [\arg RangeMin, \arg RangeMax).
316 /// \param ScaledConstant [out] - On success, the pre-scaled constant value.
317 static bool isScaledConstantInRange(SDValue Node, int Scale,
318 int RangeMin, int RangeMax,
319 int &ScaledConstant) {
320 assert(Scale > 0 && "Invalid scale!");
322 // Check that this is a constant.
323 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
327 ScaledConstant = (int) C->getZExtValue();
328 if ((ScaledConstant % Scale) != 0)
331 ScaledConstant /= Scale;
332 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
335 /// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
336 /// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
337 /// least on current ARM implementations) which should be avoidded.
338 bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
339 if (OptLevel == CodeGenOpt::None)
342 if (!CheckVMLxHazard)
345 if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9())
351 SDNode *Use = *N->use_begin();
352 if (Use->getOpcode() == ISD::CopyToReg)
354 if (Use->isMachineOpcode()) {
355 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
358 unsigned Opcode = MCID.getOpcode();
359 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
361 // vmlx feeding into another vmlx. We actually want to unfold
362 // the use later in the MLxExpansion pass. e.g.
364 // vmla (stall 8 cycles)
369 // This adds up to about 18 - 19 cycles.
372 // vmul (stall 4 cycles)
373 // vadd adds up to about 14 cycles.
374 return TII->isFpMLxInstruction(Opcode);
380 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
381 ARM_AM::ShiftOpc ShOpcVal,
383 if (!Subtarget->isCortexA9())
385 if (Shift.hasOneUse())
388 return ShOpcVal == ARM_AM::lsl && ShAmt == 2;
391 bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
394 bool CheckProfitability) {
395 if (DisableShifterOp)
398 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
400 // Don't match base register only case. That is matched to a separate
401 // lower complexity pattern with explicit register operand.
402 if (ShOpcVal == ARM_AM::no_shift) return false;
404 BaseReg = N.getOperand(0);
405 unsigned ShImmVal = 0;
406 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
407 if (!RHS) return false;
408 ShImmVal = RHS->getZExtValue() & 31;
409 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
414 bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
418 bool CheckProfitability) {
419 if (DisableShifterOp)
422 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
424 // Don't match base register only case. That is matched to a separate
425 // lower complexity pattern with explicit register operand.
426 if (ShOpcVal == ARM_AM::no_shift) return false;
428 BaseReg = N.getOperand(0);
429 unsigned ShImmVal = 0;
430 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
431 if (RHS) return false;
433 ShReg = N.getOperand(1);
434 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
436 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
442 bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
445 // Match simple R + imm12 operands.
448 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
449 !CurDAG->isBaseWithConstantOffset(N)) {
450 if (N.getOpcode() == ISD::FrameIndex) {
451 // Match frame index.
452 int FI = cast<FrameIndexSDNode>(N)->getIndex();
453 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
454 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
458 if (N.getOpcode() == ARMISD::Wrapper &&
459 !(Subtarget->useMovt() &&
460 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
461 Base = N.getOperand(0);
464 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
468 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
469 int RHSC = (int)RHS->getZExtValue();
470 if (N.getOpcode() == ISD::SUB)
473 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
474 Base = N.getOperand(0);
475 if (Base.getOpcode() == ISD::FrameIndex) {
476 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
477 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
479 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
486 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
492 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
494 if (N.getOpcode() == ISD::MUL &&
495 (!Subtarget->isCortexA9() || N.hasOneUse())) {
496 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
497 // X * [3,5,9] -> X + X * [2,4,8] etc.
498 int RHSC = (int)RHS->getZExtValue();
501 ARM_AM::AddrOpc AddSub = ARM_AM::add;
503 AddSub = ARM_AM::sub;
506 if (isPowerOf2_32(RHSC)) {
507 unsigned ShAmt = Log2_32(RHSC);
508 Base = Offset = N.getOperand(0);
509 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
518 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
519 // ISD::OR that is equivalent to an ISD::ADD.
520 !CurDAG->isBaseWithConstantOffset(N))
523 // Leave simple R +/- imm12 operands for LDRi12
524 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
526 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
527 -0x1000+1, 0x1000, RHSC)) // 12 bits.
531 // Otherwise this is R +/- [possibly shifted] R.
532 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
533 ARM_AM::ShiftOpc ShOpcVal =
534 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
537 Base = N.getOperand(0);
538 Offset = N.getOperand(1);
540 if (ShOpcVal != ARM_AM::no_shift) {
541 // Check to see if the RHS of the shift is a constant, if not, we can't fold
543 if (ConstantSDNode *Sh =
544 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
545 ShAmt = Sh->getZExtValue();
546 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
547 Offset = N.getOperand(1).getOperand(0);
550 ShOpcVal = ARM_AM::no_shift;
553 ShOpcVal = ARM_AM::no_shift;
557 // Try matching (R shl C) + (R).
558 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
559 !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
560 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
561 if (ShOpcVal != ARM_AM::no_shift) {
562 // Check to see if the RHS of the shift is a constant, if not, we can't
564 if (ConstantSDNode *Sh =
565 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
566 ShAmt = Sh->getZExtValue();
567 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
568 Offset = N.getOperand(0).getOperand(0);
569 Base = N.getOperand(1);
572 ShOpcVal = ARM_AM::no_shift;
575 ShOpcVal = ARM_AM::no_shift;
580 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
588 AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
592 if (N.getOpcode() == ISD::MUL &&
593 (!Subtarget->isCortexA9() || N.hasOneUse())) {
594 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
595 // X * [3,5,9] -> X + X * [2,4,8] etc.
596 int RHSC = (int)RHS->getZExtValue();
599 ARM_AM::AddrOpc AddSub = ARM_AM::add;
601 AddSub = ARM_AM::sub;
604 if (isPowerOf2_32(RHSC)) {
605 unsigned ShAmt = Log2_32(RHSC);
606 Base = Offset = N.getOperand(0);
607 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
616 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
617 // ISD::OR that is equivalent to an ADD.
618 !CurDAG->isBaseWithConstantOffset(N)) {
620 if (N.getOpcode() == ISD::FrameIndex) {
621 int FI = cast<FrameIndexSDNode>(N)->getIndex();
622 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
623 } else if (N.getOpcode() == ARMISD::Wrapper &&
624 !(Subtarget->useMovt() &&
625 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
626 Base = N.getOperand(0);
628 Offset = CurDAG->getRegister(0, MVT::i32);
629 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
635 // Match simple R +/- imm12 operands.
636 if (N.getOpcode() != ISD::SUB) {
638 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
639 -0x1000+1, 0x1000, RHSC)) { // 12 bits.
640 Base = N.getOperand(0);
641 if (Base.getOpcode() == ISD::FrameIndex) {
642 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
643 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
645 Offset = CurDAG->getRegister(0, MVT::i32);
647 ARM_AM::AddrOpc AddSub = ARM_AM::add;
649 AddSub = ARM_AM::sub;
652 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
659 if (Subtarget->isCortexA9() && !N.hasOneUse()) {
660 // Compute R +/- (R << N) and reuse it.
662 Offset = CurDAG->getRegister(0, MVT::i32);
663 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
669 // Otherwise this is R +/- [possibly shifted] R.
670 ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub;
671 ARM_AM::ShiftOpc ShOpcVal =
672 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
675 Base = N.getOperand(0);
676 Offset = N.getOperand(1);
678 if (ShOpcVal != ARM_AM::no_shift) {
679 // Check to see if the RHS of the shift is a constant, if not, we can't fold
681 if (ConstantSDNode *Sh =
682 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
683 ShAmt = Sh->getZExtValue();
684 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
685 Offset = N.getOperand(1).getOperand(0);
688 ShOpcVal = ARM_AM::no_shift;
691 ShOpcVal = ARM_AM::no_shift;
695 // Try matching (R shl C) + (R).
696 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
697 !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
698 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
699 if (ShOpcVal != ARM_AM::no_shift) {
700 // Check to see if the RHS of the shift is a constant, if not, we can't
702 if (ConstantSDNode *Sh =
703 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
704 ShAmt = Sh->getZExtValue();
705 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
706 Offset = N.getOperand(0).getOperand(0);
707 Base = N.getOperand(1);
710 ShOpcVal = ARM_AM::no_shift;
713 ShOpcVal = ARM_AM::no_shift;
718 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
723 bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
724 SDValue &Offset, SDValue &Opc) {
725 unsigned Opcode = Op->getOpcode();
726 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
727 ? cast<LoadSDNode>(Op)->getAddressingMode()
728 : cast<StoreSDNode>(Op)->getAddressingMode();
729 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
730 ? ARM_AM::add : ARM_AM::sub;
732 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
736 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
738 if (ShOpcVal != ARM_AM::no_shift) {
739 // Check to see if the RHS of the shift is a constant, if not, we can't fold
741 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
742 ShAmt = Sh->getZExtValue();
743 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
744 Offset = N.getOperand(0);
747 ShOpcVal = ARM_AM::no_shift;
750 ShOpcVal = ARM_AM::no_shift;
754 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
759 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
760 SDValue &Offset, SDValue &Opc) {
761 unsigned Opcode = Op->getOpcode();
762 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
763 ? cast<LoadSDNode>(Op)->getAddressingMode()
764 : cast<StoreSDNode>(Op)->getAddressingMode();
765 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
766 ? ARM_AM::add : ARM_AM::sub;
768 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
769 if (AddSub == ARM_AM::sub) Val *= -1;
770 Offset = CurDAG->getRegister(0, MVT::i32);
771 Opc = CurDAG->getTargetConstant(Val, MVT::i32);
779 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
780 SDValue &Offset, SDValue &Opc) {
781 unsigned Opcode = Op->getOpcode();
782 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
783 ? cast<LoadSDNode>(Op)->getAddressingMode()
784 : cast<StoreSDNode>(Op)->getAddressingMode();
785 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
786 ? ARM_AM::add : ARM_AM::sub;
788 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
789 Offset = CurDAG->getRegister(0, MVT::i32);
790 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
799 bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
804 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
805 SDValue &Base, SDValue &Offset,
807 if (N.getOpcode() == ISD::SUB) {
808 // X - C is canonicalize to X + -C, no need to handle it here.
809 Base = N.getOperand(0);
810 Offset = N.getOperand(1);
811 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
815 if (!CurDAG->isBaseWithConstantOffset(N)) {
817 if (N.getOpcode() == ISD::FrameIndex) {
818 int FI = cast<FrameIndexSDNode>(N)->getIndex();
819 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
821 Offset = CurDAG->getRegister(0, MVT::i32);
822 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
826 // If the RHS is +/- imm8, fold into addr mode.
828 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
829 -256 + 1, 256, RHSC)) { // 8 bits.
830 Base = N.getOperand(0);
831 if (Base.getOpcode() == ISD::FrameIndex) {
832 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
833 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
835 Offset = CurDAG->getRegister(0, MVT::i32);
837 ARM_AM::AddrOpc AddSub = ARM_AM::add;
839 AddSub = ARM_AM::sub;
842 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
846 Base = N.getOperand(0);
847 Offset = N.getOperand(1);
848 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
852 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
853 SDValue &Offset, SDValue &Opc) {
854 unsigned Opcode = Op->getOpcode();
855 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
856 ? cast<LoadSDNode>(Op)->getAddressingMode()
857 : cast<StoreSDNode>(Op)->getAddressingMode();
858 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
859 ? ARM_AM::add : ARM_AM::sub;
861 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
862 Offset = CurDAG->getRegister(0, MVT::i32);
863 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
868 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
872 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
873 SDValue &Base, SDValue &Offset) {
874 if (!CurDAG->isBaseWithConstantOffset(N)) {
876 if (N.getOpcode() == ISD::FrameIndex) {
877 int FI = cast<FrameIndexSDNode>(N)->getIndex();
878 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
879 } else if (N.getOpcode() == ARMISD::Wrapper &&
880 !(Subtarget->useMovt() &&
881 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
882 Base = N.getOperand(0);
884 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
889 // If the RHS is +/- imm8, fold into addr mode.
891 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4,
892 -256 + 1, 256, RHSC)) {
893 Base = N.getOperand(0);
894 if (Base.getOpcode() == ISD::FrameIndex) {
895 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
896 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
899 ARM_AM::AddrOpc AddSub = ARM_AM::add;
901 AddSub = ARM_AM::sub;
904 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
910 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
915 bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
919 unsigned Alignment = 0;
920 if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) {
921 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
922 // The maximum alignment is equal to the memory size being referenced.
923 unsigned LSNAlign = LSN->getAlignment();
924 unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
925 if (LSNAlign >= MemSize && MemSize > 1)
928 // All other uses of addrmode6 are for intrinsics. For now just record
929 // the raw alignment value; it will be refined later based on the legal
930 // alignment operands for the intrinsic.
931 Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment();
934 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
938 bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
940 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
941 ISD::MemIndexedMode AM = LdSt->getAddressingMode();
942 if (AM != ISD::POST_INC)
945 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
946 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
947 Offset = CurDAG->getRegister(0, MVT::i32);
952 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
953 SDValue &Offset, SDValue &Label) {
954 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
955 Offset = N.getOperand(0);
956 SDValue N1 = N.getOperand(1);
957 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
966 //===----------------------------------------------------------------------===//
967 // Thumb Addressing Modes
968 //===----------------------------------------------------------------------===//
970 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
971 SDValue &Base, SDValue &Offset){
972 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
973 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
974 if (!NC || !NC->isNullValue())
981 Base = N.getOperand(0);
982 Offset = N.getOperand(1);
987 ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base,
988 SDValue &Offset, unsigned Scale) {
990 SDValue TmpBase, TmpOffImm;
991 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
992 return false; // We want to select tLDRspi / tSTRspi instead.
994 if (N.getOpcode() == ARMISD::Wrapper &&
995 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
996 return false; // We want to select tLDRpci instead.
999 if (!CurDAG->isBaseWithConstantOffset(N))
1002 // Thumb does not have [sp, r] address mode.
1003 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1004 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1005 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1006 (RHSR && RHSR->getReg() == ARM::SP))
1009 // FIXME: Why do we explicitly check for a match here and then return false?
1010 // Presumably to allow something else to match, but shouldn't this be
1013 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC))
1016 Base = N.getOperand(0);
1017 Offset = N.getOperand(1);
1022 ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N,
1025 return SelectThumbAddrModeRI(N, Base, Offset, 1);
1029 ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N,
1032 return SelectThumbAddrModeRI(N, Base, Offset, 2);
1036 ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N,
1039 return SelectThumbAddrModeRI(N, Base, Offset, 4);
1043 ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1044 SDValue &Base, SDValue &OffImm) {
1046 SDValue TmpBase, TmpOffImm;
1047 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1048 return false; // We want to select tLDRspi / tSTRspi instead.
1050 if (N.getOpcode() == ARMISD::Wrapper &&
1051 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1052 return false; // We want to select tLDRpci instead.
1055 if (!CurDAG->isBaseWithConstantOffset(N)) {
1056 if (N.getOpcode() == ARMISD::Wrapper &&
1057 !(Subtarget->useMovt() &&
1058 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
1059 Base = N.getOperand(0);
1064 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1068 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1069 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1070 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1071 (RHSR && RHSR->getReg() == ARM::SP)) {
1072 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0));
1073 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1074 unsigned LHSC = LHS ? LHS->getZExtValue() : 0;
1075 unsigned RHSC = RHS ? RHS->getZExtValue() : 0;
1077 // Thumb does not have [sp, #imm5] address mode for non-zero imm5.
1078 if (LHSC != 0 || RHSC != 0) return false;
1081 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1085 // If the RHS is + imm5 * scale, fold into addr mode.
1087 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1088 Base = N.getOperand(0);
1089 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1093 Base = N.getOperand(0);
1094 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1099 ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1101 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1105 ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1107 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1111 ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1113 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1116 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1117 SDValue &Base, SDValue &OffImm) {
1118 if (N.getOpcode() == ISD::FrameIndex) {
1119 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1120 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1121 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1125 if (!CurDAG->isBaseWithConstantOffset(N))
1128 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1129 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
1130 (LHSR && LHSR->getReg() == ARM::SP)) {
1131 // If the RHS is + imm8 * scale, fold into addr mode.
1133 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1134 Base = N.getOperand(0);
1135 if (Base.getOpcode() == ISD::FrameIndex) {
1136 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1137 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1139 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1148 //===----------------------------------------------------------------------===//
1149 // Thumb 2 Addressing Modes
1150 //===----------------------------------------------------------------------===//
1153 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
1155 if (DisableShifterOp)
1158 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
1160 // Don't match base register only case. That is matched to a separate
1161 // lower complexity pattern with explicit register operand.
1162 if (ShOpcVal == ARM_AM::no_shift) return false;
1164 BaseReg = N.getOperand(0);
1165 unsigned ShImmVal = 0;
1166 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1167 ShImmVal = RHS->getZExtValue() & 31;
1168 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
1175 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1176 SDValue &Base, SDValue &OffImm) {
1177 // Match simple R + imm12 operands.
1180 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1181 !CurDAG->isBaseWithConstantOffset(N)) {
1182 if (N.getOpcode() == ISD::FrameIndex) {
1183 // Match frame index.
1184 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1185 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1186 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1190 if (N.getOpcode() == ARMISD::Wrapper &&
1191 !(Subtarget->useMovt() &&
1192 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
1193 Base = N.getOperand(0);
1194 if (Base.getOpcode() == ISD::TargetConstantPool)
1195 return false; // We want to select t2LDRpci instead.
1198 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1202 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1203 if (SelectT2AddrModeImm8(N, Base, OffImm))
1204 // Let t2LDRi8 handle (R - imm8).
1207 int RHSC = (int)RHS->getZExtValue();
1208 if (N.getOpcode() == ISD::SUB)
1211 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1212 Base = N.getOperand(0);
1213 if (Base.getOpcode() == ISD::FrameIndex) {
1214 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1215 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1217 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1224 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1228 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1229 SDValue &Base, SDValue &OffImm) {
1230 // Match simple R - imm8 operands.
1231 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1232 !CurDAG->isBaseWithConstantOffset(N))
1235 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1236 int RHSC = (int)RHS->getSExtValue();
1237 if (N.getOpcode() == ISD::SUB)
1240 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1241 Base = N.getOperand(0);
1242 if (Base.getOpcode() == ISD::FrameIndex) {
1243 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1244 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1246 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1254 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1256 unsigned Opcode = Op->getOpcode();
1257 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1258 ? cast<LoadSDNode>(Op)->getAddressingMode()
1259 : cast<StoreSDNode>(Op)->getAddressingMode();
1261 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1262 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1263 ? CurDAG->getTargetConstant(RHSC, MVT::i32)
1264 : CurDAG->getTargetConstant(-RHSC, MVT::i32);
1271 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1273 SDValue &OffReg, SDValue &ShImm) {
1274 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1275 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1278 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1279 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1280 int RHSC = (int)RHS->getZExtValue();
1281 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1283 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1287 // Look for (R + R) or (R + (R << [1,2,3])).
1289 Base = N.getOperand(0);
1290 OffReg = N.getOperand(1);
1292 // Swap if it is ((R << c) + R).
1293 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
1294 if (ShOpcVal != ARM_AM::lsl) {
1295 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1296 if (ShOpcVal == ARM_AM::lsl)
1297 std::swap(Base, OffReg);
1300 if (ShOpcVal == ARM_AM::lsl) {
1301 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1303 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1304 ShAmt = Sh->getZExtValue();
1305 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1306 OffReg = OffReg.getOperand(0);
1309 ShOpcVal = ARM_AM::no_shift;
1312 ShOpcVal = ARM_AM::no_shift;
1316 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
1321 //===--------------------------------------------------------------------===//
1323 /// getAL - Returns a ARMCC::AL immediate node.
1324 static inline SDValue getAL(SelectionDAG *CurDAG) {
1325 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
1328 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
1329 LoadSDNode *LD = cast<LoadSDNode>(N);
1330 ISD::MemIndexedMode AM = LD->getAddressingMode();
1331 if (AM == ISD::UNINDEXED)
1334 EVT LoadedVT = LD->getMemoryVT();
1335 SDValue Offset, AMOpc;
1336 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1337 unsigned Opcode = 0;
1339 if (LoadedVT == MVT::i32 && isPre &&
1340 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1341 Opcode = ARM::LDR_PRE_IMM;
1343 } else if (LoadedVT == MVT::i32 && !isPre &&
1344 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1345 Opcode = ARM::LDR_POST_IMM;
1347 } else if (LoadedVT == MVT::i32 &&
1348 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1349 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1352 } else if (LoadedVT == MVT::i16 &&
1353 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1355 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1356 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1357 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1358 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1359 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1360 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1362 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1366 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1368 Opcode = ARM::LDRB_PRE_IMM;
1369 } else if (!isPre &&
1370 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1372 Opcode = ARM::LDRB_POST_IMM;
1373 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1375 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1381 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1382 SDValue Chain = LD->getChain();
1383 SDValue Base = LD->getBasePtr();
1384 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG),
1385 CurDAG->getRegister(0, MVT::i32), Chain };
1386 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32,
1387 MVT::i32, MVT::Other, Ops, 5);
1389 SDValue Chain = LD->getChain();
1390 SDValue Base = LD->getBasePtr();
1391 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
1392 CurDAG->getRegister(0, MVT::i32), Chain };
1393 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32,
1394 MVT::i32, MVT::Other, Ops, 6);
1401 SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
1402 LoadSDNode *LD = cast<LoadSDNode>(N);
1403 ISD::MemIndexedMode AM = LD->getAddressingMode();
1404 if (AM == ISD::UNINDEXED)
1407 EVT LoadedVT = LD->getMemoryVT();
1408 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1410 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1411 unsigned Opcode = 0;
1413 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1414 switch (LoadedVT.getSimpleVT().SimpleTy) {
1416 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1420 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1422 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1427 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1429 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1438 SDValue Chain = LD->getChain();
1439 SDValue Base = LD->getBasePtr();
1440 SDValue Ops[]= { Base, Offset, getAL(CurDAG),
1441 CurDAG->getRegister(0, MVT::i32), Chain };
1442 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32,
1443 MVT::Other, Ops, 5);
1449 /// PairSRegs - Form a D register from a pair of S registers.
1451 SDNode *ARMDAGToDAGISel::PairSRegs(EVT VT, SDValue V0, SDValue V1) {
1452 DebugLoc dl = V0.getNode()->getDebugLoc();
1454 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32);
1455 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1456 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1457 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1458 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1461 /// PairDRegs - Form a quad register from a pair of D registers.
1463 SDNode *ARMDAGToDAGISel::PairDRegs(EVT VT, SDValue V0, SDValue V1) {
1464 DebugLoc dl = V0.getNode()->getDebugLoc();
1465 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32);
1466 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1467 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1468 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1469 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1472 /// PairQRegs - Form 4 consecutive D registers from a pair of Q registers.
1474 SDNode *ARMDAGToDAGISel::PairQRegs(EVT VT, SDValue V0, SDValue V1) {
1475 DebugLoc dl = V0.getNode()->getDebugLoc();
1476 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1477 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1478 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1479 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1480 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1483 /// QuadSRegs - Form 4 consecutive S registers.
1485 SDNode *ARMDAGToDAGISel::QuadSRegs(EVT VT, SDValue V0, SDValue V1,
1486 SDValue V2, SDValue V3) {
1487 DebugLoc dl = V0.getNode()->getDebugLoc();
1489 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32);
1490 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1491 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1492 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
1493 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
1494 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1495 V2, SubReg2, V3, SubReg3 };
1496 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1499 /// QuadDRegs - Form 4 consecutive D registers.
1501 SDNode *ARMDAGToDAGISel::QuadDRegs(EVT VT, SDValue V0, SDValue V1,
1502 SDValue V2, SDValue V3) {
1503 DebugLoc dl = V0.getNode()->getDebugLoc();
1504 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1505 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1506 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1507 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
1508 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
1509 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1510 V2, SubReg2, V3, SubReg3 };
1511 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1514 /// QuadQRegs - Form 4 consecutive Q registers.
1516 SDNode *ARMDAGToDAGISel::QuadQRegs(EVT VT, SDValue V0, SDValue V1,
1517 SDValue V2, SDValue V3) {
1518 DebugLoc dl = V0.getNode()->getDebugLoc();
1519 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32);
1520 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1521 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1522 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32);
1523 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32);
1524 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1525 V2, SubReg2, V3, SubReg3 };
1526 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1529 /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1530 /// of a NEON VLD or VST instruction. The supported values depend on the
1531 /// number of registers being loaded.
1532 SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
1533 bool is64BitVector) {
1534 unsigned NumRegs = NumVecs;
1535 if (!is64BitVector && NumVecs < 3)
1538 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1539 if (Alignment >= 32 && NumRegs == 4)
1541 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1543 else if (Alignment >= 8)
1548 return CurDAG->getTargetConstant(Alignment, MVT::i32);
1551 // Get the register stride update opcode of a VLD/VST instruction that
1552 // is otherwise equivalent to the given fixed stride updating instruction.
1553 static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
1556 case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
1557 case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
1558 case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
1559 case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
1560 case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
1561 case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
1562 case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
1563 case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
1565 case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
1566 case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
1567 case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
1568 case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
1569 case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
1570 case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
1571 case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
1572 case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
1573 case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
1574 case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
1576 case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
1577 case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
1578 case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
1579 case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
1580 case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
1581 case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
1583 case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
1584 case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
1585 case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
1586 case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
1587 case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
1588 case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
1590 case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
1591 case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
1592 case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
1594 return Opc; // If not one we handle, return it unchanged.
1597 SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
1598 const uint16_t *DOpcodes,
1599 const uint16_t *QOpcodes0,
1600 const uint16_t *QOpcodes1) {
1601 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
1602 DebugLoc dl = N->getDebugLoc();
1604 SDValue MemAddr, Align;
1605 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1606 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1609 SDValue Chain = N->getOperand(0);
1610 EVT VT = N->getValueType(0);
1611 bool is64BitVector = VT.is64BitVector();
1612 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1614 unsigned OpcodeIndex;
1615 switch (VT.getSimpleVT().SimpleTy) {
1616 default: llvm_unreachable("unhandled vld type");
1617 // Double-register operations:
1618 case MVT::v8i8: OpcodeIndex = 0; break;
1619 case MVT::v4i16: OpcodeIndex = 1; break;
1621 case MVT::v2i32: OpcodeIndex = 2; break;
1622 case MVT::v1i64: OpcodeIndex = 3; break;
1623 // Quad-register operations:
1624 case MVT::v16i8: OpcodeIndex = 0; break;
1625 case MVT::v8i16: OpcodeIndex = 1; break;
1627 case MVT::v4i32: OpcodeIndex = 2; break;
1628 case MVT::v2i64: OpcodeIndex = 3;
1629 assert(NumVecs == 1 && "v2i64 type only supported for VLD1");
1637 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1640 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1642 std::vector<EVT> ResTys;
1643 ResTys.push_back(ResTy);
1645 ResTys.push_back(MVT::i32);
1646 ResTys.push_back(MVT::Other);
1648 SDValue Pred = getAL(CurDAG);
1649 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1651 SmallVector<SDValue, 7> Ops;
1653 // Double registers and VLD1/VLD2 quad registers are directly supported.
1654 if (is64BitVector || NumVecs <= 2) {
1655 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1656 QOpcodes0[OpcodeIndex]);
1657 Ops.push_back(MemAddr);
1658 Ops.push_back(Align);
1660 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1661 // FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0
1662 // case entirely when the rest are updated to that form, too.
1663 if ((NumVecs == 1 || NumVecs == 2) && !isa<ConstantSDNode>(Inc.getNode()))
1664 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1665 // We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
1666 // check for that explicitly too. Horribly hacky, but temporary.
1667 if ((NumVecs != 1 && NumVecs != 2 && Opc != ARM::VLD1q64wb_fixed) ||
1668 !isa<ConstantSDNode>(Inc.getNode()))
1669 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1671 Ops.push_back(Pred);
1672 Ops.push_back(Reg0);
1673 Ops.push_back(Chain);
1674 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
1677 // Otherwise, quad registers are loaded with two separate instructions,
1678 // where one loads the even registers and the other loads the odd registers.
1679 EVT AddrTy = MemAddr.getValueType();
1681 // Load the even subregs. This is always an updating load, so that it
1682 // provides the address to the second load for the odd subregs.
1684 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
1685 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
1686 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1687 ResTy, AddrTy, MVT::Other, OpsA, 7);
1688 Chain = SDValue(VLdA, 2);
1690 // Load the odd subregs.
1691 Ops.push_back(SDValue(VLdA, 1));
1692 Ops.push_back(Align);
1694 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1695 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1696 "only constant post-increment update allowed for VLD3/4");
1698 Ops.push_back(Reg0);
1700 Ops.push_back(SDValue(VLdA, 0));
1701 Ops.push_back(Pred);
1702 Ops.push_back(Reg0);
1703 Ops.push_back(Chain);
1704 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
1705 Ops.data(), Ops.size());
1708 // Transfer memoperands.
1709 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1710 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1711 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
1716 // Extract out the subregisters.
1717 SDValue SuperReg = SDValue(VLd, 0);
1718 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1719 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1720 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
1721 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1722 ReplaceUses(SDValue(N, Vec),
1723 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1724 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
1726 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
1730 SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
1731 const uint16_t *DOpcodes,
1732 const uint16_t *QOpcodes0,
1733 const uint16_t *QOpcodes1) {
1734 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
1735 DebugLoc dl = N->getDebugLoc();
1737 SDValue MemAddr, Align;
1738 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1739 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1740 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1743 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1744 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1746 SDValue Chain = N->getOperand(0);
1747 EVT VT = N->getOperand(Vec0Idx).getValueType();
1748 bool is64BitVector = VT.is64BitVector();
1749 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1751 unsigned OpcodeIndex;
1752 switch (VT.getSimpleVT().SimpleTy) {
1753 default: llvm_unreachable("unhandled vst type");
1754 // Double-register operations:
1755 case MVT::v8i8: OpcodeIndex = 0; break;
1756 case MVT::v4i16: OpcodeIndex = 1; break;
1758 case MVT::v2i32: OpcodeIndex = 2; break;
1759 case MVT::v1i64: OpcodeIndex = 3; break;
1760 // Quad-register operations:
1761 case MVT::v16i8: OpcodeIndex = 0; break;
1762 case MVT::v8i16: OpcodeIndex = 1; break;
1764 case MVT::v4i32: OpcodeIndex = 2; break;
1765 case MVT::v2i64: OpcodeIndex = 3;
1766 assert(NumVecs == 1 && "v2i64 type only supported for VST1");
1770 std::vector<EVT> ResTys;
1772 ResTys.push_back(MVT::i32);
1773 ResTys.push_back(MVT::Other);
1775 SDValue Pred = getAL(CurDAG);
1776 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1777 SmallVector<SDValue, 7> Ops;
1779 // Double registers and VST1/VST2 quad registers are directly supported.
1780 if (is64BitVector || NumVecs <= 2) {
1783 SrcReg = N->getOperand(Vec0Idx);
1784 } else if (is64BitVector) {
1785 // Form a REG_SEQUENCE to force register allocation.
1786 SDValue V0 = N->getOperand(Vec0Idx + 0);
1787 SDValue V1 = N->getOperand(Vec0Idx + 1);
1789 SrcReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
1791 SDValue V2 = N->getOperand(Vec0Idx + 2);
1792 // If it's a vst3, form a quad D-register and leave the last part as
1794 SDValue V3 = (NumVecs == 3)
1795 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
1796 : N->getOperand(Vec0Idx + 3);
1797 SrcReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1800 // Form a QQ register.
1801 SDValue Q0 = N->getOperand(Vec0Idx);
1802 SDValue Q1 = N->getOperand(Vec0Idx + 1);
1803 SrcReg = SDValue(PairQRegs(MVT::v4i64, Q0, Q1), 0);
1806 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1807 QOpcodes0[OpcodeIndex]);
1808 Ops.push_back(MemAddr);
1809 Ops.push_back(Align);
1811 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1812 // FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0
1813 // case entirely when the rest are updated to that form, too.
1814 if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode()))
1815 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1816 // We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so
1817 // check for that explicitly too. Horribly hacky, but temporary.
1818 if ((NumVecs > 2 && Opc != ARM::VST1q64wb_fixed) ||
1819 !isa<ConstantSDNode>(Inc.getNode()))
1820 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1822 Ops.push_back(SrcReg);
1823 Ops.push_back(Pred);
1824 Ops.push_back(Reg0);
1825 Ops.push_back(Chain);
1827 CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
1829 // Transfer memoperands.
1830 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
1835 // Otherwise, quad registers are stored with two separate instructions,
1836 // where one stores the even registers and the other stores the odd registers.
1838 // Form the QQQQ REG_SEQUENCE.
1839 SDValue V0 = N->getOperand(Vec0Idx + 0);
1840 SDValue V1 = N->getOperand(Vec0Idx + 1);
1841 SDValue V2 = N->getOperand(Vec0Idx + 2);
1842 SDValue V3 = (NumVecs == 3)
1843 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1844 : N->getOperand(Vec0Idx + 3);
1845 SDValue RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
1847 // Store the even D registers. This is always an updating store, so that it
1848 // provides the address to the second store for the odd subregs.
1849 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
1850 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1851 MemAddr.getValueType(),
1852 MVT::Other, OpsA, 7);
1853 cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1);
1854 Chain = SDValue(VStA, 1);
1856 // Store the odd D registers.
1857 Ops.push_back(SDValue(VStA, 0));
1858 Ops.push_back(Align);
1860 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1861 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1862 "only constant post-increment update allowed for VST3/4");
1864 Ops.push_back(Reg0);
1866 Ops.push_back(RegSeq);
1867 Ops.push_back(Pred);
1868 Ops.push_back(Reg0);
1869 Ops.push_back(Chain);
1870 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
1871 Ops.data(), Ops.size());
1872 cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
1876 SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
1877 bool isUpdating, unsigned NumVecs,
1878 const uint16_t *DOpcodes,
1879 const uint16_t *QOpcodes) {
1880 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
1881 DebugLoc dl = N->getDebugLoc();
1883 SDValue MemAddr, Align;
1884 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1885 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1886 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1889 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1890 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1892 SDValue Chain = N->getOperand(0);
1894 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
1895 EVT VT = N->getOperand(Vec0Idx).getValueType();
1896 bool is64BitVector = VT.is64BitVector();
1898 unsigned Alignment = 0;
1900 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1901 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
1902 if (Alignment > NumBytes)
1903 Alignment = NumBytes;
1904 if (Alignment < 8 && Alignment < NumBytes)
1906 // Alignment must be a power of two; make sure of that.
1907 Alignment = (Alignment & -Alignment);
1911 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1913 unsigned OpcodeIndex;
1914 switch (VT.getSimpleVT().SimpleTy) {
1915 default: llvm_unreachable("unhandled vld/vst lane type");
1916 // Double-register operations:
1917 case MVT::v8i8: OpcodeIndex = 0; break;
1918 case MVT::v4i16: OpcodeIndex = 1; break;
1920 case MVT::v2i32: OpcodeIndex = 2; break;
1921 // Quad-register operations:
1922 case MVT::v8i16: OpcodeIndex = 0; break;
1924 case MVT::v4i32: OpcodeIndex = 1; break;
1927 std::vector<EVT> ResTys;
1929 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1932 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
1933 MVT::i64, ResTyElts));
1936 ResTys.push_back(MVT::i32);
1937 ResTys.push_back(MVT::Other);
1939 SDValue Pred = getAL(CurDAG);
1940 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1942 SmallVector<SDValue, 8> Ops;
1943 Ops.push_back(MemAddr);
1944 Ops.push_back(Align);
1946 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1947 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1951 SDValue V0 = N->getOperand(Vec0Idx + 0);
1952 SDValue V1 = N->getOperand(Vec0Idx + 1);
1955 SuperReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
1957 SuperReg = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0);
1959 SDValue V2 = N->getOperand(Vec0Idx + 2);
1960 SDValue V3 = (NumVecs == 3)
1961 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1962 : N->getOperand(Vec0Idx + 3);
1964 SuperReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1966 SuperReg = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
1968 Ops.push_back(SuperReg);
1969 Ops.push_back(getI32Imm(Lane));
1970 Ops.push_back(Pred);
1971 Ops.push_back(Reg0);
1972 Ops.push_back(Chain);
1974 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1975 QOpcodes[OpcodeIndex]);
1976 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys,
1977 Ops.data(), Ops.size());
1978 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
1982 // Extract the subregisters.
1983 SuperReg = SDValue(VLdLn, 0);
1984 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1985 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1986 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
1987 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1988 ReplaceUses(SDValue(N, Vec),
1989 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1990 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
1992 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
1996 SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
1998 const uint16_t *Opcodes) {
1999 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
2000 DebugLoc dl = N->getDebugLoc();
2002 SDValue MemAddr, Align;
2003 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
2006 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2007 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2009 SDValue Chain = N->getOperand(0);
2010 EVT VT = N->getValueType(0);
2012 unsigned Alignment = 0;
2014 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2015 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2016 if (Alignment > NumBytes)
2017 Alignment = NumBytes;
2018 if (Alignment < 8 && Alignment < NumBytes)
2020 // Alignment must be a power of two; make sure of that.
2021 Alignment = (Alignment & -Alignment);
2025 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
2027 unsigned OpcodeIndex;
2028 switch (VT.getSimpleVT().SimpleTy) {
2029 default: llvm_unreachable("unhandled vld-dup type");
2030 case MVT::v8i8: OpcodeIndex = 0; break;
2031 case MVT::v4i16: OpcodeIndex = 1; break;
2033 case MVT::v2i32: OpcodeIndex = 2; break;
2036 SDValue Pred = getAL(CurDAG);
2037 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2039 unsigned Opc = Opcodes[OpcodeIndex];
2040 SmallVector<SDValue, 6> Ops;
2041 Ops.push_back(MemAddr);
2042 Ops.push_back(Align);
2044 // fixed-stride update instructions don't have an explicit writeback
2045 // operand. It's implicit in the opcode itself.
2046 SDValue Inc = N->getOperand(2);
2047 if (!isa<ConstantSDNode>(Inc.getNode()))
2049 // FIXME: VLD3 and VLD4 haven't been updated to that form yet.
2050 else if (NumVecs > 2)
2051 Ops.push_back(Reg0);
2053 Ops.push_back(Pred);
2054 Ops.push_back(Reg0);
2055 Ops.push_back(Chain);
2057 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2058 std::vector<EVT> ResTys;
2059 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts));
2061 ResTys.push_back(MVT::i32);
2062 ResTys.push_back(MVT::Other);
2064 CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
2065 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
2066 SuperReg = SDValue(VLdDup, 0);
2068 // Extract the subregisters.
2069 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
2070 unsigned SubIdx = ARM::dsub_0;
2071 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2072 ReplaceUses(SDValue(N, Vec),
2073 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
2074 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
2076 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
2080 SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
2082 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
2083 DebugLoc dl = N->getDebugLoc();
2084 EVT VT = N->getValueType(0);
2085 unsigned FirstTblReg = IsExt ? 2 : 1;
2087 // Form a REG_SEQUENCE to force register allocation.
2089 SDValue V0 = N->getOperand(FirstTblReg + 0);
2090 SDValue V1 = N->getOperand(FirstTblReg + 1);
2092 RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0);
2094 SDValue V2 = N->getOperand(FirstTblReg + 2);
2095 // If it's a vtbl3, form a quad D-register and leave the last part as
2097 SDValue V3 = (NumVecs == 3)
2098 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2099 : N->getOperand(FirstTblReg + 3);
2100 RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
2103 SmallVector<SDValue, 6> Ops;
2105 Ops.push_back(N->getOperand(1));
2106 Ops.push_back(RegSeq);
2107 Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
2108 Ops.push_back(getAL(CurDAG)); // predicate
2109 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
2110 return CurDAG->getMachineNode(Opc, dl, VT, Ops.data(), Ops.size());
2113 SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
2115 if (!Subtarget->hasV6T2Ops())
2118 unsigned Opc = isSigned ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
2119 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
2122 // For unsigned extracts, check for a shift right and mask
2123 unsigned And_imm = 0;
2124 if (N->getOpcode() == ISD::AND) {
2125 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
2127 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
2128 if (And_imm & (And_imm + 1))
2131 unsigned Srl_imm = 0;
2132 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
2134 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2136 // Note: The width operand is encoded as width-1.
2137 unsigned Width = CountTrailingOnes_32(And_imm) - 1;
2138 unsigned LSB = Srl_imm;
2139 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2140 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2141 CurDAG->getTargetConstant(LSB, MVT::i32),
2142 CurDAG->getTargetConstant(Width, MVT::i32),
2143 getAL(CurDAG), Reg0 };
2144 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2150 // Otherwise, we're looking for a shift of a shift
2151 unsigned Shl_imm = 0;
2152 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
2153 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
2154 unsigned Srl_imm = 0;
2155 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
2156 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2157 // Note: The width operand is encoded as width-1.
2158 unsigned Width = 32 - Srl_imm - 1;
2159 int LSB = Srl_imm - Shl_imm;
2162 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2163 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2164 CurDAG->getTargetConstant(LSB, MVT::i32),
2165 CurDAG->getTargetConstant(Width, MVT::i32),
2166 getAL(CurDAG), Reg0 };
2167 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2173 SDNode *ARMDAGToDAGISel::
2174 SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2175 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2178 if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) {
2179 unsigned SOVal = cast<ConstantSDNode>(CPTmp1)->getZExtValue();
2180 unsigned SOShOp = ARM_AM::getSORegShOp(SOVal);
2183 case ARM_AM::lsl: Opc = ARM::t2MOVCClsl; break;
2184 case ARM_AM::lsr: Opc = ARM::t2MOVCClsr; break;
2185 case ARM_AM::asr: Opc = ARM::t2MOVCCasr; break;
2186 case ARM_AM::ror: Opc = ARM::t2MOVCCror; break;
2188 llvm_unreachable("Unknown so_reg opcode!");
2191 CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32);
2192 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2193 SDValue Ops[] = { FalseVal, CPTmp0, SOShImm, CC, CCR, InFlag };
2194 return CurDAG->SelectNodeTo(N, Opc, MVT::i32,Ops, 6);
2199 SDNode *ARMDAGToDAGISel::
2200 SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2201 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2205 if (SelectImmShifterOperand(TrueVal, CPTmp0, CPTmp2)) {
2206 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2207 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp2, CC, CCR, InFlag };
2208 return CurDAG->SelectNodeTo(N, ARM::MOVCCsi, MVT::i32, Ops, 6);
2211 if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
2212 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2213 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag };
2214 return CurDAG->SelectNodeTo(N, ARM::MOVCCsr, MVT::i32, Ops, 7);
2219 SDNode *ARMDAGToDAGISel::
2220 SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2221 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2222 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2227 unsigned TrueImm = T->getZExtValue();
2228 if (is_t2_so_imm(TrueImm)) {
2229 Opc = ARM::t2MOVCCi;
2230 } else if (TrueImm <= 0xffff) {
2231 Opc = ARM::t2MOVCCi16;
2232 } else if (is_t2_so_imm_not(TrueImm)) {
2234 Opc = ARM::t2MVNCCi;
2235 } else if (TrueVal.getNode()->hasOneUse() && Subtarget->hasV6T2Ops()) {
2237 Opc = ARM::t2MOVCCi32imm;
2241 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2242 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2243 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2244 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2250 SDNode *ARMDAGToDAGISel::
2251 SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2252 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2253 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2258 unsigned TrueImm = T->getZExtValue();
2259 bool isSoImm = is_so_imm(TrueImm);
2262 } else if (Subtarget->hasV6T2Ops() && TrueImm <= 0xffff) {
2263 Opc = ARM::MOVCCi16;
2264 } else if (is_so_imm_not(TrueImm)) {
2267 } else if (TrueVal.getNode()->hasOneUse() &&
2268 (Subtarget->hasV6T2Ops() || ARM_AM::isSOImmTwoPartVal(TrueImm))) {
2270 Opc = ARM::MOVCCi32imm;
2274 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2275 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2276 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2277 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2283 SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
2284 EVT VT = N->getValueType(0);
2285 SDValue FalseVal = N->getOperand(0);
2286 SDValue TrueVal = N->getOperand(1);
2287 SDValue CC = N->getOperand(2);
2288 SDValue CCR = N->getOperand(3);
2289 SDValue InFlag = N->getOperand(4);
2290 assert(CC.getOpcode() == ISD::Constant);
2291 assert(CCR.getOpcode() == ISD::Register);
2292 ARMCC::CondCodes CCVal =
2293 (ARMCC::CondCodes)cast<ConstantSDNode>(CC)->getZExtValue();
2295 if (!Subtarget->isThumb1Only() && VT == MVT::i32) {
2296 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2297 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2298 // Pattern complexity = 18 cost = 1 size = 0
2299 if (Subtarget->isThumb()) {
2300 SDNode *Res = SelectT2CMOVShiftOp(N, FalseVal, TrueVal,
2301 CCVal, CCR, InFlag);
2303 Res = SelectT2CMOVShiftOp(N, TrueVal, FalseVal,
2304 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2308 SDNode *Res = SelectARMCMOVShiftOp(N, FalseVal, TrueVal,
2309 CCVal, CCR, InFlag);
2311 Res = SelectARMCMOVShiftOp(N, TrueVal, FalseVal,
2312 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2317 // Pattern: (ARMcmov:i32 GPR:i32:$false,
2318 // (imm:i32)<<P:Pred_so_imm>>:$true,
2320 // Emits: (MOVCCi:i32 GPR:i32:$false,
2321 // (so_imm:i32 (imm:i32):$true), (imm:i32):$cc)
2322 // Pattern complexity = 10 cost = 1 size = 0
2323 if (Subtarget->isThumb()) {
2324 SDNode *Res = SelectT2CMOVImmOp(N, FalseVal, TrueVal,
2325 CCVal, CCR, InFlag);
2327 Res = SelectT2CMOVImmOp(N, TrueVal, FalseVal,
2328 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2332 SDNode *Res = SelectARMCMOVImmOp(N, FalseVal, TrueVal,
2333 CCVal, CCR, InFlag);
2335 Res = SelectARMCMOVImmOp(N, TrueVal, FalseVal,
2336 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2342 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2343 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2344 // Pattern complexity = 6 cost = 1 size = 0
2346 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2347 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2348 // Pattern complexity = 6 cost = 11 size = 0
2350 // Also VMOVScc and VMOVDcc.
2351 SDValue Tmp2 = CurDAG->getTargetConstant(CCVal, MVT::i32);
2352 SDValue Ops[] = { FalseVal, TrueVal, Tmp2, CCR, InFlag };
2354 switch (VT.getSimpleVT().SimpleTy) {
2355 default: llvm_unreachable("Illegal conditional move type!");
2357 Opc = Subtarget->isThumb()
2358 ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr_pseudo)
2368 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5);
2371 SDNode *ARMDAGToDAGISel::SelectConditionalOp(SDNode *N) {
2372 SDValue FalseVal = N->getOperand(0);
2373 SDValue TrueVal = N->getOperand(1);
2374 ARMCC::CondCodes CCVal =
2375 (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
2376 SDValue CCR = N->getOperand(3);
2377 assert(CCR.getOpcode() == ISD::Register);
2378 SDValue InFlag = N->getOperand(4);
2379 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2380 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2382 if (Subtarget->isThumb()) {
2385 if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) {
2387 switch (N->getOpcode()) {
2388 default: llvm_unreachable("Unexpected node");
2389 case ARMISD::CAND: Opc = ARM::t2ANDCCrs; break;
2390 case ARMISD::COR: Opc = ARM::t2ORRCCrs; break;
2391 case ARMISD::CXOR: Opc = ARM::t2EORCCrs; break;
2393 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CC, CCR, Reg0, InFlag };
2394 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
2397 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2399 unsigned TrueImm = T->getZExtValue();
2400 if (is_t2_so_imm(TrueImm)) {
2402 switch (N->getOpcode()) {
2403 default: llvm_unreachable("Unexpected node");
2404 case ARMISD::CAND: Opc = ARM::t2ANDCCri; break;
2405 case ARMISD::COR: Opc = ARM::t2ORRCCri; break;
2406 case ARMISD::CXOR: Opc = ARM::t2EORCCri; break;
2408 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2409 SDValue Ops[] = { FalseVal, True, CC, CCR, Reg0, InFlag };
2410 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 6);
2415 switch (N->getOpcode()) {
2416 default: llvm_unreachable("Unexpected node");
2417 case ARMISD::CAND: Opc = ARM::t2ANDCCrr; break;
2418 case ARMISD::COR: Opc = ARM::t2ORRCCrr; break;
2419 case ARMISD::CXOR: Opc = ARM::t2EORCCrr; break;
2421 SDValue Ops[] = { FalseVal, TrueVal, CC, CCR, Reg0, InFlag };
2422 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 6);
2428 if (SelectImmShifterOperand(TrueVal, CPTmp0, CPTmp2)) {
2430 switch (N->getOpcode()) {
2431 default: llvm_unreachable("Unexpected node");
2432 case ARMISD::CAND: Opc = ARM::ANDCCrsi; break;
2433 case ARMISD::COR: Opc = ARM::ORRCCrsi; break;
2434 case ARMISD::CXOR: Opc = ARM::EORCCrsi; break;
2436 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp2, CC, CCR, Reg0, InFlag };
2437 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
2440 if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
2442 switch (N->getOpcode()) {
2443 default: llvm_unreachable("Unexpected node");
2444 case ARMISD::CAND: Opc = ARM::ANDCCrsr; break;
2445 case ARMISD::COR: Opc = ARM::ORRCCrsr; break;
2446 case ARMISD::CXOR: Opc = ARM::EORCCrsr; break;
2448 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, Reg0, InFlag };
2449 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 8);
2452 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2454 unsigned TrueImm = T->getZExtValue();
2455 if (is_so_imm(TrueImm)) {
2457 switch (N->getOpcode()) {
2458 default: llvm_unreachable("Unexpected node");
2459 case ARMISD::CAND: Opc = ARM::ANDCCri; break;
2460 case ARMISD::COR: Opc = ARM::ORRCCri; break;
2461 case ARMISD::CXOR: Opc = ARM::EORCCri; break;
2463 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2464 SDValue Ops[] = { FalseVal, True, CC, CCR, Reg0, InFlag };
2465 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 6);
2470 switch (N->getOpcode()) {
2471 default: llvm_unreachable("Unexpected node");
2472 case ARMISD::CAND: Opc = ARM::ANDCCrr; break;
2473 case ARMISD::COR: Opc = ARM::ORRCCrr; break;
2474 case ARMISD::CXOR: Opc = ARM::EORCCrr; break;
2476 SDValue Ops[] = { FalseVal, TrueVal, CC, CCR, Reg0, InFlag };
2477 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 6);
2480 /// Target-specific DAG combining for ISD::XOR.
2481 /// Target-independent combining lowers SELECT_CC nodes of the form
2482 /// select_cc setg[ge] X, 0, X, -X
2483 /// select_cc setgt X, -1, X, -X
2484 /// select_cc setl[te] X, 0, -X, X
2485 /// select_cc setlt X, 1, -X, X
2486 /// which represent Integer ABS into:
2487 /// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
2488 /// ARM instruction selection detects the latter and matches it to
2489 /// ARM::ABS or ARM::t2ABS machine node.
2490 SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
2491 SDValue XORSrc0 = N->getOperand(0);
2492 SDValue XORSrc1 = N->getOperand(1);
2493 EVT VT = N->getValueType(0);
2495 if (DisableARMIntABS)
2498 if (Subtarget->isThumb1Only())
2501 if (XORSrc0.getOpcode() != ISD::ADD ||
2502 XORSrc1.getOpcode() != ISD::SRA)
2505 SDValue ADDSrc0 = XORSrc0.getOperand(0);
2506 SDValue ADDSrc1 = XORSrc0.getOperand(1);
2507 SDValue SRASrc0 = XORSrc1.getOperand(0);
2508 SDValue SRASrc1 = XORSrc1.getOperand(1);
2509 ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1);
2510 EVT XType = SRASrc0.getValueType();
2511 unsigned Size = XType.getSizeInBits() - 1;
2513 if (ADDSrc1 == XORSrc1 &&
2514 ADDSrc0 == SRASrc0 &&
2515 XType.isInteger() &&
2516 SRAConstant != NULL &&
2517 Size == SRAConstant->getZExtValue()) {
2519 unsigned Opcode = ARM::ABS;
2520 if (Subtarget->isThumb2())
2521 Opcode = ARM::t2ABS;
2523 return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
2529 SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
2530 // The only time a CONCAT_VECTORS operation can have legal types is when
2531 // two 64-bit vectors are concatenated to a 128-bit vector.
2532 EVT VT = N->getValueType(0);
2533 if (!VT.is128BitVector() || N->getNumOperands() != 2)
2534 llvm_unreachable("unexpected CONCAT_VECTORS");
2535 return PairDRegs(VT, N->getOperand(0), N->getOperand(1));
2538 SDNode *ARMDAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
2539 SmallVector<SDValue, 6> Ops;
2540 Ops.push_back(Node->getOperand(1)); // Ptr
2541 Ops.push_back(Node->getOperand(2)); // Low part of Val1
2542 Ops.push_back(Node->getOperand(3)); // High part of Val1
2543 if (Opc == ARM::ATOMCMPXCHG6432) {
2544 Ops.push_back(Node->getOperand(4)); // Low part of Val2
2545 Ops.push_back(Node->getOperand(5)); // High part of Val2
2547 Ops.push_back(Node->getOperand(0)); // Chain
2548 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2549 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
2550 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
2551 MVT::i32, MVT::i32, MVT::Other,
2552 Ops.data() ,Ops.size());
2553 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
2557 SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
2558 DebugLoc dl = N->getDebugLoc();
2560 if (N->isMachineOpcode())
2561 return NULL; // Already selected.
2563 switch (N->getOpcode()) {
2566 // Select special operations if XOR node forms integer ABS pattern
2567 SDNode *ResNode = SelectABSOp(N);
2570 // Other cases are autogenerated.
2573 case ISD::Constant: {
2574 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
2576 if (Subtarget->hasThumb2())
2577 // Thumb2-aware targets have the MOVT instruction, so all immediates can
2578 // be done with MOV + MOVT, at worst.
2581 if (Subtarget->isThumb()) {
2582 UseCP = (Val > 255 && // MOV
2583 ~Val > 255 && // MOV + MVN
2584 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
2586 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
2587 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
2588 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
2593 CurDAG->getTargetConstantPool(ConstantInt::get(
2594 Type::getInt32Ty(*CurDAG->getContext()), Val),
2595 TLI.getPointerTy());
2598 if (Subtarget->isThumb1Only()) {
2599 SDValue Pred = getAL(CurDAG);
2600 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2601 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
2602 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
2607 CurDAG->getTargetConstant(0, MVT::i32),
2609 CurDAG->getRegister(0, MVT::i32),
2610 CurDAG->getEntryNode()
2612 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
2615 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
2619 // Other cases are autogenerated.
2622 case ISD::FrameIndex: {
2623 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
2624 int FI = cast<FrameIndexSDNode>(N)->getIndex();
2625 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
2626 if (Subtarget->isThumb1Only()) {
2627 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2628 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2629 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, Ops, 4);
2631 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
2632 ARM::t2ADDri : ARM::ADDri);
2633 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2634 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2635 CurDAG->getRegister(0, MVT::i32) };
2636 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2640 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2644 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
2648 if (Subtarget->isThumb1Only())
2650 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
2651 unsigned RHSV = C->getZExtValue();
2653 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
2654 unsigned ShImm = Log2_32(RHSV-1);
2657 SDValue V = N->getOperand(0);
2658 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2659 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2660 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2661 if (Subtarget->isThumb()) {
2662 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2663 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6);
2665 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2666 return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops, 7);
2669 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
2670 unsigned ShImm = Log2_32(RHSV+1);
2673 SDValue V = N->getOperand(0);
2674 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2675 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2676 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2677 if (Subtarget->isThumb()) {
2678 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2679 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6);
2681 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2682 return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops, 7);
2688 // Check for unsigned bitfield extract
2689 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2692 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
2693 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
2694 // are entirely contributed by c2 and lower 16-bits are entirely contributed
2695 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
2696 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
2697 EVT VT = N->getValueType(0);
2700 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
2702 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
2705 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2706 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2709 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
2710 SDValue N2 = N0.getOperand(1);
2711 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2714 unsigned N1CVal = N1C->getZExtValue();
2715 unsigned N2CVal = N2C->getZExtValue();
2716 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
2717 (N1CVal & 0xffffU) == 0xffffU &&
2718 (N2CVal & 0xffffU) == 0x0U) {
2719 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
2721 SDValue Ops[] = { N0.getOperand(0), Imm16,
2722 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2723 return CurDAG->getMachineNode(Opc, dl, VT, Ops, 4);
2728 case ARMISD::VMOVRRD:
2729 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
2730 N->getOperand(0), getAL(CurDAG),
2731 CurDAG->getRegister(0, MVT::i32));
2732 case ISD::UMUL_LOHI: {
2733 if (Subtarget->isThumb1Only())
2735 if (Subtarget->isThumb()) {
2736 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2737 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2738 CurDAG->getRegister(0, MVT::i32) };
2739 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32,Ops,4);
2741 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2742 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2743 CurDAG->getRegister(0, MVT::i32) };
2744 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2745 ARM::UMULL : ARM::UMULLv5,
2746 dl, MVT::i32, MVT::i32, Ops, 5);
2749 case ISD::SMUL_LOHI: {
2750 if (Subtarget->isThumb1Only())
2752 if (Subtarget->isThumb()) {
2753 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2754 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2755 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32,Ops,4);
2757 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2758 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2759 CurDAG->getRegister(0, MVT::i32) };
2760 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2761 ARM::SMULL : ARM::SMULLv5,
2762 dl, MVT::i32, MVT::i32, Ops, 5);
2766 SDNode *ResNode = 0;
2767 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2768 ResNode = SelectT2IndexedLoad(N);
2770 ResNode = SelectARMIndexedLoad(N);
2773 // Other cases are autogenerated.
2776 case ARMISD::BRCOND: {
2777 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2778 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2779 // Pattern complexity = 6 cost = 1 size = 0
2781 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2782 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
2783 // Pattern complexity = 6 cost = 1 size = 0
2785 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2786 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2787 // Pattern complexity = 6 cost = 1 size = 0
2789 unsigned Opc = Subtarget->isThumb() ?
2790 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
2791 SDValue Chain = N->getOperand(0);
2792 SDValue N1 = N->getOperand(1);
2793 SDValue N2 = N->getOperand(2);
2794 SDValue N3 = N->getOperand(3);
2795 SDValue InFlag = N->getOperand(4);
2796 assert(N1.getOpcode() == ISD::BasicBlock);
2797 assert(N2.getOpcode() == ISD::Constant);
2798 assert(N3.getOpcode() == ISD::Register);
2800 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
2801 cast<ConstantSDNode>(N2)->getZExtValue()),
2803 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
2804 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
2806 Chain = SDValue(ResNode, 0);
2807 if (N->getNumValues() == 2) {
2808 InFlag = SDValue(ResNode, 1);
2809 ReplaceUses(SDValue(N, 1), InFlag);
2811 ReplaceUses(SDValue(N, 0),
2812 SDValue(Chain.getNode(), Chain.getResNo()));
2816 return SelectCMOVOp(N);
2820 return SelectConditionalOp(N);
2821 case ARMISD::VZIP: {
2823 EVT VT = N->getValueType(0);
2824 switch (VT.getSimpleVT().SimpleTy) {
2825 default: return NULL;
2826 case MVT::v8i8: Opc = ARM::VZIPd8; break;
2827 case MVT::v4i16: Opc = ARM::VZIPd16; break;
2829 // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2830 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2831 case MVT::v16i8: Opc = ARM::VZIPq8; break;
2832 case MVT::v8i16: Opc = ARM::VZIPq16; break;
2834 case MVT::v4i32: Opc = ARM::VZIPq32; break;
2836 SDValue Pred = getAL(CurDAG);
2837 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2838 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2839 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2841 case ARMISD::VUZP: {
2843 EVT VT = N->getValueType(0);
2844 switch (VT.getSimpleVT().SimpleTy) {
2845 default: return NULL;
2846 case MVT::v8i8: Opc = ARM::VUZPd8; break;
2847 case MVT::v4i16: Opc = ARM::VUZPd16; break;
2849 // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2850 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2851 case MVT::v16i8: Opc = ARM::VUZPq8; break;
2852 case MVT::v8i16: Opc = ARM::VUZPq16; break;
2854 case MVT::v4i32: Opc = ARM::VUZPq32; break;
2856 SDValue Pred = getAL(CurDAG);
2857 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2858 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2859 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2861 case ARMISD::VTRN: {
2863 EVT VT = N->getValueType(0);
2864 switch (VT.getSimpleVT().SimpleTy) {
2865 default: return NULL;
2866 case MVT::v8i8: Opc = ARM::VTRNd8; break;
2867 case MVT::v4i16: Opc = ARM::VTRNd16; break;
2869 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2870 case MVT::v16i8: Opc = ARM::VTRNq8; break;
2871 case MVT::v8i16: Opc = ARM::VTRNq16; break;
2873 case MVT::v4i32: Opc = ARM::VTRNq32; break;
2875 SDValue Pred = getAL(CurDAG);
2876 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2877 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2878 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2880 case ARMISD::BUILD_VECTOR: {
2881 EVT VecVT = N->getValueType(0);
2882 EVT EltVT = VecVT.getVectorElementType();
2883 unsigned NumElts = VecVT.getVectorNumElements();
2884 if (EltVT == MVT::f64) {
2885 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
2886 return PairDRegs(VecVT, N->getOperand(0), N->getOperand(1));
2888 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
2890 return PairSRegs(VecVT, N->getOperand(0), N->getOperand(1));
2891 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
2892 return QuadSRegs(VecVT, N->getOperand(0), N->getOperand(1),
2893 N->getOperand(2), N->getOperand(3));
2896 case ARMISD::VLD2DUP: {
2897 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
2899 return SelectVLDDup(N, false, 2, Opcodes);
2902 case ARMISD::VLD3DUP: {
2903 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
2904 ARM::VLD3DUPd16Pseudo,
2905 ARM::VLD3DUPd32Pseudo };
2906 return SelectVLDDup(N, false, 3, Opcodes);
2909 case ARMISD::VLD4DUP: {
2910 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
2911 ARM::VLD4DUPd16Pseudo,
2912 ARM::VLD4DUPd32Pseudo };
2913 return SelectVLDDup(N, false, 4, Opcodes);
2916 case ARMISD::VLD2DUP_UPD: {
2917 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8wb_fixed,
2918 ARM::VLD2DUPd16wb_fixed,
2919 ARM::VLD2DUPd32wb_fixed };
2920 return SelectVLDDup(N, true, 2, Opcodes);
2923 case ARMISD::VLD3DUP_UPD: {
2924 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
2925 ARM::VLD3DUPd16Pseudo_UPD,
2926 ARM::VLD3DUPd32Pseudo_UPD };
2927 return SelectVLDDup(N, true, 3, Opcodes);
2930 case ARMISD::VLD4DUP_UPD: {
2931 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
2932 ARM::VLD4DUPd16Pseudo_UPD,
2933 ARM::VLD4DUPd32Pseudo_UPD };
2934 return SelectVLDDup(N, true, 4, Opcodes);
2937 case ARMISD::VLD1_UPD: {
2938 static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
2939 ARM::VLD1d16wb_fixed,
2940 ARM::VLD1d32wb_fixed,
2941 ARM::VLD1d64wb_fixed };
2942 static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
2943 ARM::VLD1q16wb_fixed,
2944 ARM::VLD1q32wb_fixed,
2945 ARM::VLD1q64wb_fixed };
2946 return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0);
2949 case ARMISD::VLD2_UPD: {
2950 static const uint16_t DOpcodes[] = { ARM::VLD2d8wb_fixed,
2951 ARM::VLD2d16wb_fixed,
2952 ARM::VLD2d32wb_fixed,
2953 ARM::VLD1q64wb_fixed};
2954 static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
2955 ARM::VLD2q16PseudoWB_fixed,
2956 ARM::VLD2q32PseudoWB_fixed };
2957 return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0);
2960 case ARMISD::VLD3_UPD: {
2961 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
2962 ARM::VLD3d16Pseudo_UPD,
2963 ARM::VLD3d32Pseudo_UPD,
2964 ARM::VLD1q64wb_fixed};
2965 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
2966 ARM::VLD3q16Pseudo_UPD,
2967 ARM::VLD3q32Pseudo_UPD };
2968 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
2969 ARM::VLD3q16oddPseudo_UPD,
2970 ARM::VLD3q32oddPseudo_UPD };
2971 return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2974 case ARMISD::VLD4_UPD: {
2975 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo_UPD,
2976 ARM::VLD4d16Pseudo_UPD,
2977 ARM::VLD4d32Pseudo_UPD,
2978 ARM::VLD1q64wb_fixed};
2979 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
2980 ARM::VLD4q16Pseudo_UPD,
2981 ARM::VLD4q32Pseudo_UPD };
2982 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
2983 ARM::VLD4q16oddPseudo_UPD,
2984 ARM::VLD4q32oddPseudo_UPD };
2985 return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2988 case ARMISD::VLD2LN_UPD: {
2989 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
2990 ARM::VLD2LNd16Pseudo_UPD,
2991 ARM::VLD2LNd32Pseudo_UPD };
2992 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
2993 ARM::VLD2LNq32Pseudo_UPD };
2994 return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
2997 case ARMISD::VLD3LN_UPD: {
2998 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
2999 ARM::VLD3LNd16Pseudo_UPD,
3000 ARM::VLD3LNd32Pseudo_UPD };
3001 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
3002 ARM::VLD3LNq32Pseudo_UPD };
3003 return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
3006 case ARMISD::VLD4LN_UPD: {
3007 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
3008 ARM::VLD4LNd16Pseudo_UPD,
3009 ARM::VLD4LNd32Pseudo_UPD };
3010 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
3011 ARM::VLD4LNq32Pseudo_UPD };
3012 return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
3015 case ARMISD::VST1_UPD: {
3016 static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
3017 ARM::VST1d16wb_fixed,
3018 ARM::VST1d32wb_fixed,
3019 ARM::VST1d64wb_fixed };
3020 static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
3021 ARM::VST1q16wb_fixed,
3022 ARM::VST1q32wb_fixed,
3023 ARM::VST1q64wb_fixed };
3024 return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0);
3027 case ARMISD::VST2_UPD: {
3028 static const uint16_t DOpcodes[] = { ARM::VST2d8wb_fixed,
3029 ARM::VST2d16wb_fixed,
3030 ARM::VST2d32wb_fixed,
3031 ARM::VST1q64wb_fixed};
3032 static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
3033 ARM::VST2q16PseudoWB_fixed,
3034 ARM::VST2q32PseudoWB_fixed };
3035 return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0);
3038 case ARMISD::VST3_UPD: {
3039 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
3040 ARM::VST3d16Pseudo_UPD,
3041 ARM::VST3d32Pseudo_UPD,
3042 ARM::VST1d64TPseudoWB_fixed};
3043 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
3044 ARM::VST3q16Pseudo_UPD,
3045 ARM::VST3q32Pseudo_UPD };
3046 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
3047 ARM::VST3q16oddPseudo_UPD,
3048 ARM::VST3q32oddPseudo_UPD };
3049 return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
3052 case ARMISD::VST4_UPD: {
3053 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo_UPD,
3054 ARM::VST4d16Pseudo_UPD,
3055 ARM::VST4d32Pseudo_UPD,
3056 ARM::VST1d64QPseudoWB_fixed};
3057 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
3058 ARM::VST4q16Pseudo_UPD,
3059 ARM::VST4q32Pseudo_UPD };
3060 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
3061 ARM::VST4q16oddPseudo_UPD,
3062 ARM::VST4q32oddPseudo_UPD };
3063 return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
3066 case ARMISD::VST2LN_UPD: {
3067 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
3068 ARM::VST2LNd16Pseudo_UPD,
3069 ARM::VST2LNd32Pseudo_UPD };
3070 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
3071 ARM::VST2LNq32Pseudo_UPD };
3072 return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
3075 case ARMISD::VST3LN_UPD: {
3076 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
3077 ARM::VST3LNd16Pseudo_UPD,
3078 ARM::VST3LNd32Pseudo_UPD };
3079 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
3080 ARM::VST3LNq32Pseudo_UPD };
3081 return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
3084 case ARMISD::VST4LN_UPD: {
3085 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
3086 ARM::VST4LNd16Pseudo_UPD,
3087 ARM::VST4LNd32Pseudo_UPD };
3088 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
3089 ARM::VST4LNq32Pseudo_UPD };
3090 return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
3093 case ISD::INTRINSIC_VOID:
3094 case ISD::INTRINSIC_W_CHAIN: {
3095 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
3100 case Intrinsic::arm_ldrexd: {
3101 SDValue MemAddr = N->getOperand(2);
3102 DebugLoc dl = N->getDebugLoc();
3103 SDValue Chain = N->getOperand(0);
3105 unsigned NewOpc = ARM::LDREXD;
3106 if (Subtarget->isThumb() && Subtarget->hasThumb2())
3107 NewOpc = ARM::t2LDREXD;
3109 // arm_ldrexd returns a i64 value in {i32, i32}
3110 std::vector<EVT> ResTys;
3111 ResTys.push_back(MVT::i32);
3112 ResTys.push_back(MVT::i32);
3113 ResTys.push_back(MVT::Other);
3115 // place arguments in the right order
3116 SmallVector<SDValue, 7> Ops;
3117 Ops.push_back(MemAddr);
3118 Ops.push_back(getAL(CurDAG));
3119 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3120 Ops.push_back(Chain);
3121 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(),
3123 // Transfer memoperands.
3124 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3125 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3126 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
3128 // Until there's support for specifing explicit register constraints
3129 // like the use of even/odd register pair, hardcode ldrexd to always
3130 // use the pair [R0, R1] to hold the load result.
3131 Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ARM::R0,
3132 SDValue(Ld, 0), SDValue(0,0));
3133 Chain = CurDAG->getCopyToReg(Chain, dl, ARM::R1,
3134 SDValue(Ld, 1), Chain.getValue(1));
3137 SDValue Glue = Chain.getValue(1);
3138 if (!SDValue(N, 0).use_empty()) {
3139 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3140 ARM::R0, MVT::i32, Glue);
3141 Glue = Result.getValue(2);
3142 ReplaceUses(SDValue(N, 0), Result);
3144 if (!SDValue(N, 1).use_empty()) {
3145 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3146 ARM::R1, MVT::i32, Glue);
3147 Glue = Result.getValue(2);
3148 ReplaceUses(SDValue(N, 1), Result);
3151 ReplaceUses(SDValue(N, 2), SDValue(Ld, 2));
3155 case Intrinsic::arm_strexd: {
3156 DebugLoc dl = N->getDebugLoc();
3157 SDValue Chain = N->getOperand(0);
3158 SDValue Val0 = N->getOperand(2);
3159 SDValue Val1 = N->getOperand(3);
3160 SDValue MemAddr = N->getOperand(4);
3162 // Until there's support for specifing explicit register constraints
3163 // like the use of even/odd register pair, hardcode strexd to always
3164 // use the pair [R2, R3] to hold the i64 (i32, i32) value to be stored.
3165 Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ARM::R2, Val0,
3167 Chain = CurDAG->getCopyToReg(Chain, dl, ARM::R3, Val1, Chain.getValue(1));
3169 SDValue Glue = Chain.getValue(1);
3170 Val0 = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3171 ARM::R2, MVT::i32, Glue);
3172 Glue = Val0.getValue(1);
3173 Val1 = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3174 ARM::R3, MVT::i32, Glue);
3176 // Store exclusive double return a i32 value which is the return status
3177 // of the issued store.
3178 std::vector<EVT> ResTys;
3179 ResTys.push_back(MVT::i32);
3180 ResTys.push_back(MVT::Other);
3182 // place arguments in the right order
3183 SmallVector<SDValue, 7> Ops;
3184 Ops.push_back(Val0);
3185 Ops.push_back(Val1);
3186 Ops.push_back(MemAddr);
3187 Ops.push_back(getAL(CurDAG));
3188 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3189 Ops.push_back(Chain);
3191 unsigned NewOpc = ARM::STREXD;
3192 if (Subtarget->isThumb() && Subtarget->hasThumb2())
3193 NewOpc = ARM::t2STREXD;
3195 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(),
3197 // Transfer memoperands.
3198 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3199 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3200 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
3205 case Intrinsic::arm_neon_vld1: {
3206 static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
3207 ARM::VLD1d32, ARM::VLD1d64 };
3208 static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
3209 ARM::VLD1q32, ARM::VLD1q64};
3210 return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0);
3213 case Intrinsic::arm_neon_vld2: {
3214 static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
3215 ARM::VLD2d32, ARM::VLD1q64 };
3216 static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
3217 ARM::VLD2q32Pseudo };
3218 return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0);
3221 case Intrinsic::arm_neon_vld3: {
3222 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
3225 ARM::VLD1d64TPseudo };
3226 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
3227 ARM::VLD3q16Pseudo_UPD,
3228 ARM::VLD3q32Pseudo_UPD };
3229 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
3230 ARM::VLD3q16oddPseudo,
3231 ARM::VLD3q32oddPseudo };
3232 return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3235 case Intrinsic::arm_neon_vld4: {
3236 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
3239 ARM::VLD1d64QPseudo };
3240 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
3241 ARM::VLD4q16Pseudo_UPD,
3242 ARM::VLD4q32Pseudo_UPD };
3243 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
3244 ARM::VLD4q16oddPseudo,
3245 ARM::VLD4q32oddPseudo };
3246 return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3249 case Intrinsic::arm_neon_vld2lane: {
3250 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
3251 ARM::VLD2LNd16Pseudo,
3252 ARM::VLD2LNd32Pseudo };
3253 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
3254 ARM::VLD2LNq32Pseudo };
3255 return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
3258 case Intrinsic::arm_neon_vld3lane: {
3259 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
3260 ARM::VLD3LNd16Pseudo,
3261 ARM::VLD3LNd32Pseudo };
3262 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
3263 ARM::VLD3LNq32Pseudo };
3264 return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
3267 case Intrinsic::arm_neon_vld4lane: {
3268 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
3269 ARM::VLD4LNd16Pseudo,
3270 ARM::VLD4LNd32Pseudo };
3271 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
3272 ARM::VLD4LNq32Pseudo };
3273 return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
3276 case Intrinsic::arm_neon_vst1: {
3277 static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
3278 ARM::VST1d32, ARM::VST1d64 };
3279 static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
3280 ARM::VST1q32, ARM::VST1q64 };
3281 return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0);
3284 case Intrinsic::arm_neon_vst2: {
3285 static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
3286 ARM::VST2d32, ARM::VST1q64 };
3287 static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
3288 ARM::VST2q32Pseudo };
3289 return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0);
3292 case Intrinsic::arm_neon_vst3: {
3293 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
3296 ARM::VST1d64TPseudo };
3297 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
3298 ARM::VST3q16Pseudo_UPD,
3299 ARM::VST3q32Pseudo_UPD };
3300 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
3301 ARM::VST3q16oddPseudo,
3302 ARM::VST3q32oddPseudo };
3303 return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3306 case Intrinsic::arm_neon_vst4: {
3307 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
3310 ARM::VST1d64QPseudo };
3311 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
3312 ARM::VST4q16Pseudo_UPD,
3313 ARM::VST4q32Pseudo_UPD };
3314 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
3315 ARM::VST4q16oddPseudo,
3316 ARM::VST4q32oddPseudo };
3317 return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3320 case Intrinsic::arm_neon_vst2lane: {
3321 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
3322 ARM::VST2LNd16Pseudo,
3323 ARM::VST2LNd32Pseudo };
3324 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
3325 ARM::VST2LNq32Pseudo };
3326 return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
3329 case Intrinsic::arm_neon_vst3lane: {
3330 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
3331 ARM::VST3LNd16Pseudo,
3332 ARM::VST3LNd32Pseudo };
3333 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
3334 ARM::VST3LNq32Pseudo };
3335 return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
3338 case Intrinsic::arm_neon_vst4lane: {
3339 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
3340 ARM::VST4LNd16Pseudo,
3341 ARM::VST4LNd32Pseudo };
3342 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
3343 ARM::VST4LNq32Pseudo };
3344 return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
3350 case ISD::INTRINSIC_WO_CHAIN: {
3351 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3356 case Intrinsic::arm_neon_vtbl2:
3357 return SelectVTBL(N, false, 2, ARM::VTBL2);
3358 case Intrinsic::arm_neon_vtbl3:
3359 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
3360 case Intrinsic::arm_neon_vtbl4:
3361 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
3363 case Intrinsic::arm_neon_vtbx2:
3364 return SelectVTBL(N, true, 2, ARM::VTBX2);
3365 case Intrinsic::arm_neon_vtbx3:
3366 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
3367 case Intrinsic::arm_neon_vtbx4:
3368 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
3373 case ARMISD::VTBL1: {
3374 DebugLoc dl = N->getDebugLoc();
3375 EVT VT = N->getValueType(0);
3376 SmallVector<SDValue, 6> Ops;
3378 Ops.push_back(N->getOperand(0));
3379 Ops.push_back(N->getOperand(1));
3380 Ops.push_back(getAL(CurDAG)); // Predicate
3381 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3382 return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops.data(), Ops.size());
3384 case ARMISD::VTBL2: {
3385 DebugLoc dl = N->getDebugLoc();
3386 EVT VT = N->getValueType(0);
3388 // Form a REG_SEQUENCE to force register allocation.
3389 SDValue V0 = N->getOperand(0);
3390 SDValue V1 = N->getOperand(1);
3391 SDValue RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0);
3393 SmallVector<SDValue, 6> Ops;
3394 Ops.push_back(RegSeq);
3395 Ops.push_back(N->getOperand(2));
3396 Ops.push_back(getAL(CurDAG)); // Predicate
3397 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3398 return CurDAG->getMachineNode(ARM::VTBL2, dl, VT,
3399 Ops.data(), Ops.size());
3402 case ISD::CONCAT_VECTORS:
3403 return SelectConcatVector(N);
3405 case ARMISD::ATOMOR64_DAG:
3406 return SelectAtomic64(N, ARM::ATOMOR6432);
3407 case ARMISD::ATOMXOR64_DAG:
3408 return SelectAtomic64(N, ARM::ATOMXOR6432);
3409 case ARMISD::ATOMADD64_DAG:
3410 return SelectAtomic64(N, ARM::ATOMADD6432);
3411 case ARMISD::ATOMSUB64_DAG:
3412 return SelectAtomic64(N, ARM::ATOMSUB6432);
3413 case ARMISD::ATOMNAND64_DAG:
3414 return SelectAtomic64(N, ARM::ATOMNAND6432);
3415 case ARMISD::ATOMAND64_DAG:
3416 return SelectAtomic64(N, ARM::ATOMAND6432);
3417 case ARMISD::ATOMSWAP64_DAG:
3418 return SelectAtomic64(N, ARM::ATOMSWAP6432);
3419 case ARMISD::ATOMCMPXCHG64_DAG:
3420 return SelectAtomic64(N, ARM::ATOMCMPXCHG6432);
3423 return SelectCode(N);
3426 bool ARMDAGToDAGISel::
3427 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
3428 std::vector<SDValue> &OutOps) {
3429 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
3430 // Require the address to be in a register. That is safe for all ARM
3431 // variants and it is hard to do anything much smarter without knowing
3432 // how the operand is used.
3433 OutOps.push_back(Op);
3437 /// createARMISelDag - This pass converts a legalized DAG into a
3438 /// ARM-specific DAG, ready for instruction scheduling.
3440 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
3441 CodeGenOpt::Level OptLevel) {
3442 return new ARMDAGToDAGISel(TM, OptLevel);