1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "arm-isel"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMAddressingModes.h"
18 #include "ARMTargetMachine.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/LLVMContext.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/SelectionDAGISel.h"
30 #include "llvm/Target/TargetLowering.h"
31 #include "llvm/Target/TargetOptions.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Compiler.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
41 DisableShifterOp("disable-shifter-op", cl::Hidden,
42 cl::desc("Disable isel of shifter-op"),
46 CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
47 cl::desc("Check fp vmla / vmls hazard at isel time"),
50 //===--------------------------------------------------------------------===//
51 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
52 /// instructions for SelectionDAG operations.
57 AM2_BASE, // Simple AM2 (+-imm12)
58 AM2_SHOP // Shifter-op AM2
61 class ARMDAGToDAGISel : public SelectionDAGISel {
62 ARMBaseTargetMachine &TM;
63 const ARMBaseInstrInfo *TII;
65 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
66 /// make the right decision when generating code for different targets.
67 const ARMSubtarget *Subtarget;
70 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
71 CodeGenOpt::Level OptLevel)
72 : SelectionDAGISel(tm, OptLevel), TM(tm),
73 TII(static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo())),
74 Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
77 virtual const char *getPassName() const {
78 return "ARM Instruction Selection";
81 /// getI32Imm - Return a target constant of type i32 with the specified
83 inline SDValue getI32Imm(unsigned Imm) {
84 return CurDAG->getTargetConstant(Imm, MVT::i32);
87 SDNode *Select(SDNode *N);
90 bool hasNoVMLxHazardUse(SDNode *N) const;
91 bool isShifterOpProfitable(const SDValue &Shift,
92 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
93 bool SelectShifterOperandReg(SDValue N, SDValue &A,
94 SDValue &B, SDValue &C);
95 bool SelectShiftShifterOperandReg(SDValue N, SDValue &A,
96 SDValue &B, SDValue &C);
97 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
98 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
100 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
101 SDValue &Offset, SDValue &Opc);
102 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
104 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
107 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
109 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
112 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
114 SelectAddrMode2Worker(N, Base, Offset, Opc);
115 // return SelectAddrMode2ShOp(N, Base, Offset, Opc);
116 // This always matches one way or another.
120 bool SelectAddrMode2Offset(SDNode *Op, SDValue N,
121 SDValue &Offset, SDValue &Opc);
122 bool SelectAddrMode3(SDValue N, SDValue &Base,
123 SDValue &Offset, SDValue &Opc);
124 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
125 SDValue &Offset, SDValue &Opc);
126 bool SelectAddrMode5(SDValue N, SDValue &Base,
128 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
130 bool SelectAddrModePC(SDValue N, SDValue &Offset,
133 // Thumb Addressing Modes:
134 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
135 bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset,
137 bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset);
138 bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset);
139 bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset);
140 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
142 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
144 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
146 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
148 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
150 // Thumb 2 Addressing Modes:
151 bool SelectT2ShifterOperandReg(SDValue N,
152 SDValue &BaseReg, SDValue &Opc);
153 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
154 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
156 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
158 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
159 SDValue &OffReg, SDValue &ShImm);
161 inline bool is_so_imm(unsigned Imm) const {
162 return ARM_AM::getSOImmVal(Imm) != -1;
165 inline bool is_so_imm_not(unsigned Imm) const {
166 return ARM_AM::getSOImmVal(~Imm) != -1;
169 inline bool is_t2_so_imm(unsigned Imm) const {
170 return ARM_AM::getT2SOImmVal(Imm) != -1;
173 inline bool is_t2_so_imm_not(unsigned Imm) const {
174 return ARM_AM::getT2SOImmVal(~Imm) != -1;
177 inline bool Pred_so_imm(SDNode *inN) const {
178 ConstantSDNode *N = cast<ConstantSDNode>(inN);
179 return is_so_imm(N->getZExtValue());
182 inline bool Pred_t2_so_imm(SDNode *inN) const {
183 ConstantSDNode *N = cast<ConstantSDNode>(inN);
184 return is_t2_so_imm(N->getZExtValue());
187 // Include the pieces autogenerated from the target description.
188 #include "ARMGenDAGISel.inc"
191 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
193 SDNode *SelectARMIndexedLoad(SDNode *N);
194 SDNode *SelectT2IndexedLoad(SDNode *N);
196 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
197 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
198 /// loads of D registers and even subregs and odd subregs of Q registers.
199 /// For NumVecs <= 2, QOpcodes1 is not used.
200 SDNode *SelectVLD(SDNode *N, unsigned NumVecs, unsigned *DOpcodes,
201 unsigned *QOpcodes0, unsigned *QOpcodes1);
203 /// SelectVST - Select NEON store intrinsics. NumVecs should
204 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
205 /// stores of D registers and even subregs and odd subregs of Q registers.
206 /// For NumVecs <= 2, QOpcodes1 is not used.
207 SDNode *SelectVST(SDNode *N, unsigned NumVecs, unsigned *DOpcodes,
208 unsigned *QOpcodes0, unsigned *QOpcodes1);
210 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
211 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
212 /// load/store of D registers and Q registers.
213 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad, unsigned NumVecs,
214 unsigned *DOpcodes, unsigned *QOpcodes);
216 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
217 /// should be 2, 3 or 4. The opcode array specifies the instructions used
218 /// for loading D registers. (Q registers are not supported.)
219 SDNode *SelectVLDDup(SDNode *N, unsigned NumVecs, unsigned *Opcodes);
221 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
222 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
223 /// generated to force the table registers to be consecutive.
224 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
226 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
227 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
229 /// SelectCMOVOp - Select CMOV instructions for ARM.
230 SDNode *SelectCMOVOp(SDNode *N);
231 SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
232 ARMCC::CondCodes CCVal, SDValue CCR,
234 SDNode *SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
235 ARMCC::CondCodes CCVal, SDValue CCR,
237 SDNode *SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
238 ARMCC::CondCodes CCVal, SDValue CCR,
240 SDNode *SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
241 ARMCC::CondCodes CCVal, SDValue CCR,
244 SDNode *SelectConcatVector(SDNode *N);
246 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
247 /// inline asm expressions.
248 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
250 std::vector<SDValue> &OutOps);
252 // Form pairs of consecutive S, D, or Q registers.
253 SDNode *PairSRegs(EVT VT, SDValue V0, SDValue V1);
254 SDNode *PairDRegs(EVT VT, SDValue V0, SDValue V1);
255 SDNode *PairQRegs(EVT VT, SDValue V0, SDValue V1);
257 // Form sequences of 4 consecutive S, D, or Q registers.
258 SDNode *QuadSRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
259 SDNode *QuadDRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
260 SDNode *QuadQRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
262 // Get the alignment operand for a NEON VLD or VST instruction.
263 SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
267 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
268 /// operand. If so Imm will receive the 32-bit value.
269 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
270 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
271 Imm = cast<ConstantSDNode>(N)->getZExtValue();
277 // isInt32Immediate - This method tests to see if a constant operand.
278 // If so Imm will receive the 32 bit value.
279 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
280 return isInt32Immediate(N.getNode(), Imm);
283 // isOpcWithIntImmediate - This method tests to see if the node is a specific
284 // opcode and that it has a immediate integer right operand.
285 // If so Imm will receive the 32 bit value.
286 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
287 return N->getOpcode() == Opc &&
288 isInt32Immediate(N->getOperand(1).getNode(), Imm);
291 /// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
292 /// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
293 /// least on current ARM implementations) which should be avoidded.
294 bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
295 if (OptLevel == CodeGenOpt::None)
298 if (!CheckVMLxHazard)
301 if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9())
307 SDNode *Use = *N->use_begin();
308 if (Use->getOpcode() == ISD::CopyToReg)
310 if (Use->isMachineOpcode()) {
311 const TargetInstrDesc &TID = TII->get(Use->getMachineOpcode());
314 unsigned Opcode = TID.getOpcode();
315 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
317 // vmlx feeding into another vmlx. We actually want to unfold
318 // the use later in the MLxExpansion pass. e.g.
320 // vmla (stall 8 cycles)
325 // This adds up to about 18 - 19 cycles.
328 // vmul (stall 4 cycles)
329 // vadd adds up to about 14 cycles.
330 return TII->isFpMLxInstruction(Opcode);
336 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
337 ARM_AM::ShiftOpc ShOpcVal,
339 if (!Subtarget->isCortexA9())
341 if (Shift.hasOneUse())
344 return ShOpcVal == ARM_AM::lsl && ShAmt == 2;
347 bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue N,
351 if (DisableShifterOp)
354 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
356 // Don't match base register only case. That is matched to a separate
357 // lower complexity pattern with explicit register operand.
358 if (ShOpcVal == ARM_AM::no_shift) return false;
360 BaseReg = N.getOperand(0);
361 unsigned ShImmVal = 0;
362 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
363 ShReg = CurDAG->getRegister(0, MVT::i32);
364 ShImmVal = RHS->getZExtValue() & 31;
366 ShReg = N.getOperand(1);
367 if (!isShifterOpProfitable(N, ShOpcVal, ShImmVal))
370 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
375 bool ARMDAGToDAGISel::SelectShiftShifterOperandReg(SDValue N,
379 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
381 // Don't match base register only case. That is matched to a separate
382 // lower complexity pattern with explicit register operand.
383 if (ShOpcVal == ARM_AM::no_shift) return false;
385 BaseReg = N.getOperand(0);
386 unsigned ShImmVal = 0;
387 // Do not check isShifterOpProfitable. This must return true.
388 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
389 ShReg = CurDAG->getRegister(0, MVT::i32);
390 ShImmVal = RHS->getZExtValue() & 31;
392 ShReg = N.getOperand(1);
394 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
399 bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
402 // Match simple R + imm12 operands.
405 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
406 if (N.getOpcode() == ISD::FrameIndex) {
407 // Match frame index...
408 int FI = cast<FrameIndexSDNode>(N)->getIndex();
409 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
410 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
412 } else if (N.getOpcode() == ARMISD::Wrapper &&
413 !(Subtarget->useMovt() &&
414 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
415 Base = N.getOperand(0);
418 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
422 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
423 int RHSC = (int)RHS->getZExtValue();
424 if (N.getOpcode() == ISD::SUB)
427 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
428 Base = N.getOperand(0);
429 if (Base.getOpcode() == ISD::FrameIndex) {
430 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
431 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
433 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
440 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
446 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
448 if (N.getOpcode() == ISD::MUL &&
449 (!Subtarget->isCortexA9() || N.hasOneUse())) {
450 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
451 // X * [3,5,9] -> X + X * [2,4,8] etc.
452 int RHSC = (int)RHS->getZExtValue();
455 ARM_AM::AddrOpc AddSub = ARM_AM::add;
457 AddSub = ARM_AM::sub;
460 if (isPowerOf2_32(RHSC)) {
461 unsigned ShAmt = Log2_32(RHSC);
462 Base = Offset = N.getOperand(0);
463 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
472 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB)
475 // Leave simple R +/- imm12 operands for LDRi12
476 if (N.getOpcode() == ISD::ADD) {
477 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
478 int RHSC = (int)RHS->getZExtValue();
479 if ((RHSC >= 0 && RHSC < 0x1000) ||
480 (RHSC < 0 && RHSC > -0x1000)) // 12 bits.
485 if (Subtarget->isCortexA9() && !N.hasOneUse())
486 // Compute R +/- (R << N) and reuse it.
489 // Otherwise this is R +/- [possibly shifted] R.
490 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub;
491 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
494 Base = N.getOperand(0);
495 Offset = N.getOperand(1);
497 if (ShOpcVal != ARM_AM::no_shift) {
498 // Check to see if the RHS of the shift is a constant, if not, we can't fold
500 if (ConstantSDNode *Sh =
501 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
502 ShAmt = Sh->getZExtValue();
503 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
504 Offset = N.getOperand(1).getOperand(0);
507 ShOpcVal = ARM_AM::no_shift;
510 ShOpcVal = ARM_AM::no_shift;
514 // Try matching (R shl C) + (R).
515 if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift &&
516 !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
517 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
518 if (ShOpcVal != ARM_AM::no_shift) {
519 // Check to see if the RHS of the shift is a constant, if not, we can't
521 if (ConstantSDNode *Sh =
522 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
523 ShAmt = Sh->getZExtValue();
524 if (!Subtarget->isCortexA9() ||
526 isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt))) {
527 Offset = N.getOperand(0).getOperand(0);
528 Base = N.getOperand(1);
531 ShOpcVal = ARM_AM::no_shift;
534 ShOpcVal = ARM_AM::no_shift;
539 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
549 AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
553 if (N.getOpcode() == ISD::MUL &&
554 (!Subtarget->isCortexA9() || N.hasOneUse())) {
555 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
556 // X * [3,5,9] -> X + X * [2,4,8] etc.
557 int RHSC = (int)RHS->getZExtValue();
560 ARM_AM::AddrOpc AddSub = ARM_AM::add;
562 AddSub = ARM_AM::sub;
565 if (isPowerOf2_32(RHSC)) {
566 unsigned ShAmt = Log2_32(RHSC);
567 Base = Offset = N.getOperand(0);
568 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
577 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
579 if (N.getOpcode() == ISD::FrameIndex) {
580 int FI = cast<FrameIndexSDNode>(N)->getIndex();
581 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
582 } else if (N.getOpcode() == ARMISD::Wrapper &&
583 !(Subtarget->useMovt() &&
584 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
585 Base = N.getOperand(0);
587 Offset = CurDAG->getRegister(0, MVT::i32);
588 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
594 // Match simple R +/- imm12 operands.
595 if (N.getOpcode() == ISD::ADD) {
596 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
597 int RHSC = (int)RHS->getZExtValue();
598 if ((RHSC >= 0 && RHSC < 0x1000) ||
599 (RHSC < 0 && RHSC > -0x1000)) { // 12 bits.
600 Base = N.getOperand(0);
601 if (Base.getOpcode() == ISD::FrameIndex) {
602 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
603 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
605 Offset = CurDAG->getRegister(0, MVT::i32);
607 ARM_AM::AddrOpc AddSub = ARM_AM::add;
609 AddSub = ARM_AM::sub;
612 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
620 if (Subtarget->isCortexA9() && !N.hasOneUse()) {
621 // Compute R +/- (R << N) and reuse it.
623 Offset = CurDAG->getRegister(0, MVT::i32);
624 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
630 // Otherwise this is R +/- [possibly shifted] R.
631 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub;
632 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
635 Base = N.getOperand(0);
636 Offset = N.getOperand(1);
638 if (ShOpcVal != ARM_AM::no_shift) {
639 // Check to see if the RHS of the shift is a constant, if not, we can't fold
641 if (ConstantSDNode *Sh =
642 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
643 ShAmt = Sh->getZExtValue();
644 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
645 Offset = N.getOperand(1).getOperand(0);
648 ShOpcVal = ARM_AM::no_shift;
651 ShOpcVal = ARM_AM::no_shift;
655 // Try matching (R shl C) + (R).
656 if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift &&
657 !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
658 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
659 if (ShOpcVal != ARM_AM::no_shift) {
660 // Check to see if the RHS of the shift is a constant, if not, we can't
662 if (ConstantSDNode *Sh =
663 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
664 ShAmt = Sh->getZExtValue();
665 if (!Subtarget->isCortexA9() ||
667 isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt))) {
668 Offset = N.getOperand(0).getOperand(0);
669 Base = N.getOperand(1);
672 ShOpcVal = ARM_AM::no_shift;
675 ShOpcVal = ARM_AM::no_shift;
680 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
685 bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDNode *Op, SDValue N,
686 SDValue &Offset, SDValue &Opc) {
687 unsigned Opcode = Op->getOpcode();
688 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
689 ? cast<LoadSDNode>(Op)->getAddressingMode()
690 : cast<StoreSDNode>(Op)->getAddressingMode();
691 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
692 ? ARM_AM::add : ARM_AM::sub;
693 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
694 int Val = (int)C->getZExtValue();
695 if (Val >= 0 && Val < 0x1000) { // 12 bits.
696 Offset = CurDAG->getRegister(0, MVT::i32);
697 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
705 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
707 if (ShOpcVal != ARM_AM::no_shift) {
708 // Check to see if the RHS of the shift is a constant, if not, we can't fold
710 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
711 ShAmt = Sh->getZExtValue();
712 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
713 Offset = N.getOperand(0);
716 ShOpcVal = ARM_AM::no_shift;
719 ShOpcVal = ARM_AM::no_shift;
723 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
729 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
730 SDValue &Base, SDValue &Offset,
732 if (N.getOpcode() == ISD::SUB) {
733 // X - C is canonicalize to X + -C, no need to handle it here.
734 Base = N.getOperand(0);
735 Offset = N.getOperand(1);
736 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
740 if (N.getOpcode() != ISD::ADD) {
742 if (N.getOpcode() == ISD::FrameIndex) {
743 int FI = cast<FrameIndexSDNode>(N)->getIndex();
744 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
746 Offset = CurDAG->getRegister(0, MVT::i32);
747 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
751 // If the RHS is +/- imm8, fold into addr mode.
752 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
753 int RHSC = (int)RHS->getZExtValue();
754 if ((RHSC >= 0 && RHSC < 256) ||
755 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
756 Base = N.getOperand(0);
757 if (Base.getOpcode() == ISD::FrameIndex) {
758 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
759 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
761 Offset = CurDAG->getRegister(0, MVT::i32);
763 ARM_AM::AddrOpc AddSub = ARM_AM::add;
765 AddSub = ARM_AM::sub;
768 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
773 Base = N.getOperand(0);
774 Offset = N.getOperand(1);
775 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
779 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
780 SDValue &Offset, SDValue &Opc) {
781 unsigned Opcode = Op->getOpcode();
782 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
783 ? cast<LoadSDNode>(Op)->getAddressingMode()
784 : cast<StoreSDNode>(Op)->getAddressingMode();
785 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
786 ? ARM_AM::add : ARM_AM::sub;
787 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
788 int Val = (int)C->getZExtValue();
789 if (Val >= 0 && Val < 256) {
790 Offset = CurDAG->getRegister(0, MVT::i32);
791 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
797 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
801 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
802 SDValue &Base, SDValue &Offset) {
803 if (N.getOpcode() != ISD::ADD) {
805 if (N.getOpcode() == ISD::FrameIndex) {
806 int FI = cast<FrameIndexSDNode>(N)->getIndex();
807 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
808 } else if (N.getOpcode() == ARMISD::Wrapper &&
809 !(Subtarget->useMovt() &&
810 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
811 Base = N.getOperand(0);
813 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
818 // If the RHS is +/- imm8, fold into addr mode.
819 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
820 int RHSC = (int)RHS->getZExtValue();
821 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied by 4.
823 if ((RHSC >= 0 && RHSC < 256) ||
824 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
825 Base = N.getOperand(0);
826 if (Base.getOpcode() == ISD::FrameIndex) {
827 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
828 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
831 ARM_AM::AddrOpc AddSub = ARM_AM::add;
833 AddSub = ARM_AM::sub;
836 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
844 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
849 bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
853 unsigned Alignment = 0;
854 if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) {
855 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
856 // The maximum alignment is equal to the memory size being referenced.
857 unsigned LSNAlign = LSN->getAlignment();
858 unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
859 if (LSNAlign > MemSize && MemSize > 1)
862 // All other uses of addrmode6 are for intrinsics. For now just record
863 // the raw alignment value; it will be refined later based on the legal
864 // alignment operands for the intrinsic.
865 Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment();
868 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
872 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
873 SDValue &Offset, SDValue &Label) {
874 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
875 Offset = N.getOperand(0);
876 SDValue N1 = N.getOperand(1);
877 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
886 //===----------------------------------------------------------------------===//
887 // Thumb Addressing Modes
888 //===----------------------------------------------------------------------===//
891 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
892 SDValue &Base, SDValue &Offset){
893 // FIXME dl should come from the parent load or store, not the address
894 if (N.getOpcode() != ISD::ADD) {
895 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
896 if (!NC || !NC->isNullValue())
903 Base = N.getOperand(0);
904 Offset = N.getOperand(1);
909 ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base,
910 SDValue &Offset, unsigned Scale) {
912 SDValue TmpBase, TmpOffImm;
913 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
914 return false; // We want to select tLDRspi / tSTRspi instead.
916 if (N.getOpcode() == ARMISD::Wrapper &&
917 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
918 return false; // We want to select tLDRpci instead.
921 if (N.getOpcode() != ISD::ADD)
924 // Thumb does not have [sp, r] address mode.
925 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
926 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
927 if ((LHSR && LHSR->getReg() == ARM::SP) ||
928 (RHSR && RHSR->getReg() == ARM::SP))
931 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
932 int RHSC = (int)RHS->getZExtValue();
934 if ((RHSC & (Scale - 1)) == 0) { // The constant is implicitly multiplied.
937 if (RHSC >= 0 && RHSC < 32)
942 Base = N.getOperand(0);
943 Offset = N.getOperand(1);
948 ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N,
951 return SelectThumbAddrModeRI(N, Base, Offset, 1);
955 ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N,
958 return SelectThumbAddrModeRI(N, Base, Offset, 2);
962 ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N,
965 return SelectThumbAddrModeRI(N, Base, Offset, 4);
969 ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
970 SDValue &Base, SDValue &OffImm) {
972 SDValue TmpBase, TmpOffImm;
973 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
974 return false; // We want to select tLDRspi / tSTRspi instead.
976 if (N.getOpcode() == ARMISD::Wrapper &&
977 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
978 return false; // We want to select tLDRpci instead.
981 if (N.getOpcode() != ISD::ADD) {
982 if (N.getOpcode() == ARMISD::Wrapper &&
983 !(Subtarget->useMovt() &&
984 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
985 Base = N.getOperand(0);
990 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
994 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
995 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
996 if ((LHSR && LHSR->getReg() == ARM::SP) ||
997 (RHSR && RHSR->getReg() == ARM::SP)) {
998 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0));
999 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1000 unsigned LHSC = LHS ? LHS->getZExtValue() : 0;
1001 unsigned RHSC = RHS ? RHS->getZExtValue() : 0;
1003 // Thumb does not have [sp, #imm5] address mode for non-zero imm5.
1004 if (LHSC != 0 || RHSC != 0) return false;
1007 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1011 // If the RHS is + imm5 * scale, fold into addr mode.
1012 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1013 int RHSC = (int)RHS->getZExtValue();
1015 if ((RHSC & (Scale - 1)) == 0) { // The constant is implicitly multiplied.
1018 if (RHSC >= 0 && RHSC < 32) {
1019 Base = N.getOperand(0);
1020 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1026 Base = N.getOperand(0);
1027 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1032 ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1034 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1038 ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1040 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1044 ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1046 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1049 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1050 SDValue &Base, SDValue &OffImm) {
1051 if (N.getOpcode() == ISD::FrameIndex) {
1052 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1053 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1054 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1058 if (N.getOpcode() != ISD::ADD)
1061 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1062 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
1063 (LHSR && LHSR->getReg() == ARM::SP)) {
1064 // If the RHS is + imm8 * scale, fold into addr mode.
1065 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1066 int RHSC = (int)RHS->getZExtValue();
1067 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied.
1069 if (RHSC >= 0 && RHSC < 256) {
1070 Base = N.getOperand(0);
1071 if (Base.getOpcode() == ISD::FrameIndex) {
1072 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1073 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1075 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1086 //===----------------------------------------------------------------------===//
1087 // Thumb 2 Addressing Modes
1088 //===----------------------------------------------------------------------===//
1091 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
1093 if (DisableShifterOp)
1096 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
1098 // Don't match base register only case. That is matched to a separate
1099 // lower complexity pattern with explicit register operand.
1100 if (ShOpcVal == ARM_AM::no_shift) return false;
1102 BaseReg = N.getOperand(0);
1103 unsigned ShImmVal = 0;
1104 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1105 ShImmVal = RHS->getZExtValue() & 31;
1106 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
1113 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1114 SDValue &Base, SDValue &OffImm) {
1115 // Match simple R + imm12 operands.
1118 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
1119 if (N.getOpcode() == ISD::FrameIndex) {
1120 // Match frame index...
1121 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1122 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1123 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1125 } else if (N.getOpcode() == ARMISD::Wrapper &&
1126 !(Subtarget->useMovt() &&
1127 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
1128 Base = N.getOperand(0);
1129 if (Base.getOpcode() == ISD::TargetConstantPool)
1130 return false; // We want to select t2LDRpci instead.
1133 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1137 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1138 if (SelectT2AddrModeImm8(N, Base, OffImm))
1139 // Let t2LDRi8 handle (R - imm8).
1142 int RHSC = (int)RHS->getZExtValue();
1143 if (N.getOpcode() == ISD::SUB)
1146 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1147 Base = N.getOperand(0);
1148 if (Base.getOpcode() == ISD::FrameIndex) {
1149 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1150 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1152 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1159 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1163 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1164 SDValue &Base, SDValue &OffImm) {
1165 // Match simple R - imm8 operands.
1166 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::SUB) {
1167 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1168 int RHSC = (int)RHS->getSExtValue();
1169 if (N.getOpcode() == ISD::SUB)
1172 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1173 Base = N.getOperand(0);
1174 if (Base.getOpcode() == ISD::FrameIndex) {
1175 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1176 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1178 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1187 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1189 unsigned Opcode = Op->getOpcode();
1190 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1191 ? cast<LoadSDNode>(Op)->getAddressingMode()
1192 : cast<StoreSDNode>(Op)->getAddressingMode();
1193 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N)) {
1194 int RHSC = (int)RHS->getZExtValue();
1195 if (RHSC >= 0 && RHSC < 0x100) { // 8 bits.
1196 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1197 ? CurDAG->getTargetConstant(RHSC, MVT::i32)
1198 : CurDAG->getTargetConstant(-RHSC, MVT::i32);
1206 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1208 SDValue &OffReg, SDValue &ShImm) {
1209 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1210 if (N.getOpcode() != ISD::ADD)
1213 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1214 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1215 int RHSC = (int)RHS->getZExtValue();
1216 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1218 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1222 if (Subtarget->isCortexA9() && !N.hasOneUse()) {
1223 // Compute R + (R << [1,2,3]) and reuse it.
1228 // Look for (R + R) or (R + (R << [1,2,3])).
1230 Base = N.getOperand(0);
1231 OffReg = N.getOperand(1);
1233 // Swap if it is ((R << c) + R).
1234 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg);
1235 if (ShOpcVal != ARM_AM::lsl) {
1236 ShOpcVal = ARM_AM::getShiftOpcForNode(Base);
1237 if (ShOpcVal == ARM_AM::lsl)
1238 std::swap(Base, OffReg);
1241 if (ShOpcVal == ARM_AM::lsl) {
1242 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1244 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1245 ShAmt = Sh->getZExtValue();
1246 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1247 OffReg = OffReg.getOperand(0);
1250 ShOpcVal = ARM_AM::no_shift;
1253 ShOpcVal = ARM_AM::no_shift;
1257 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
1262 //===--------------------------------------------------------------------===//
1264 /// getAL - Returns a ARMCC::AL immediate node.
1265 static inline SDValue getAL(SelectionDAG *CurDAG) {
1266 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
1269 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
1270 LoadSDNode *LD = cast<LoadSDNode>(N);
1271 ISD::MemIndexedMode AM = LD->getAddressingMode();
1272 if (AM == ISD::UNINDEXED)
1275 EVT LoadedVT = LD->getMemoryVT();
1276 SDValue Offset, AMOpc;
1277 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1278 unsigned Opcode = 0;
1280 if (LoadedVT == MVT::i32 &&
1281 SelectAddrMode2Offset(N, LD->getOffset(), Offset, AMOpc)) {
1282 Opcode = isPre ? ARM::LDR_PRE : ARM::LDR_POST;
1284 } else if (LoadedVT == MVT::i16 &&
1285 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1287 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1288 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1289 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1290 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1291 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1292 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1294 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1297 if (SelectAddrMode2Offset(N, LD->getOffset(), Offset, AMOpc)) {
1299 Opcode = isPre ? ARM::LDRB_PRE : ARM::LDRB_POST;
1305 SDValue Chain = LD->getChain();
1306 SDValue Base = LD->getBasePtr();
1307 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
1308 CurDAG->getRegister(0, MVT::i32), Chain };
1309 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32,
1310 MVT::Other, Ops, 6);
1316 SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
1317 LoadSDNode *LD = cast<LoadSDNode>(N);
1318 ISD::MemIndexedMode AM = LD->getAddressingMode();
1319 if (AM == ISD::UNINDEXED)
1322 EVT LoadedVT = LD->getMemoryVT();
1323 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1325 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1326 unsigned Opcode = 0;
1328 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1329 switch (LoadedVT.getSimpleVT().SimpleTy) {
1331 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1335 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1337 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1342 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1344 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1353 SDValue Chain = LD->getChain();
1354 SDValue Base = LD->getBasePtr();
1355 SDValue Ops[]= { Base, Offset, getAL(CurDAG),
1356 CurDAG->getRegister(0, MVT::i32), Chain };
1357 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32,
1358 MVT::Other, Ops, 5);
1364 /// PairSRegs - Form a D register from a pair of S registers.
1366 SDNode *ARMDAGToDAGISel::PairSRegs(EVT VT, SDValue V0, SDValue V1) {
1367 DebugLoc dl = V0.getNode()->getDebugLoc();
1368 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1369 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1370 const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 };
1371 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4);
1374 /// PairDRegs - Form a quad register from a pair of D registers.
1376 SDNode *ARMDAGToDAGISel::PairDRegs(EVT VT, SDValue V0, SDValue V1) {
1377 DebugLoc dl = V0.getNode()->getDebugLoc();
1378 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1379 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1380 const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 };
1381 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4);
1384 /// PairQRegs - Form 4 consecutive D registers from a pair of Q registers.
1386 SDNode *ARMDAGToDAGISel::PairQRegs(EVT VT, SDValue V0, SDValue V1) {
1387 DebugLoc dl = V0.getNode()->getDebugLoc();
1388 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1389 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1390 const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 };
1391 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4);
1394 /// QuadSRegs - Form 4 consecutive S registers.
1396 SDNode *ARMDAGToDAGISel::QuadSRegs(EVT VT, SDValue V0, SDValue V1,
1397 SDValue V2, SDValue V3) {
1398 DebugLoc dl = V0.getNode()->getDebugLoc();
1399 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1400 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1401 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
1402 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
1403 const SDValue Ops[] = { V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, SubReg3 };
1404 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 8);
1407 /// QuadDRegs - Form 4 consecutive D registers.
1409 SDNode *ARMDAGToDAGISel::QuadDRegs(EVT VT, SDValue V0, SDValue V1,
1410 SDValue V2, SDValue V3) {
1411 DebugLoc dl = V0.getNode()->getDebugLoc();
1412 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1413 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1414 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
1415 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
1416 const SDValue Ops[] = { V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, SubReg3 };
1417 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 8);
1420 /// QuadQRegs - Form 4 consecutive Q registers.
1422 SDNode *ARMDAGToDAGISel::QuadQRegs(EVT VT, SDValue V0, SDValue V1,
1423 SDValue V2, SDValue V3) {
1424 DebugLoc dl = V0.getNode()->getDebugLoc();
1425 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1426 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1427 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32);
1428 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32);
1429 const SDValue Ops[] = { V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, SubReg3 };
1430 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 8);
1433 /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1434 /// of a NEON VLD or VST instruction. The supported values depend on the
1435 /// number of registers being loaded.
1436 SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
1437 bool is64BitVector) {
1438 unsigned NumRegs = NumVecs;
1439 if (!is64BitVector && NumVecs < 3)
1442 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1443 if (Alignment >= 32 && NumRegs == 4)
1445 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1447 else if (Alignment >= 8)
1452 return CurDAG->getTargetConstant(Alignment, MVT::i32);
1455 SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs,
1456 unsigned *DOpcodes, unsigned *QOpcodes0,
1457 unsigned *QOpcodes1) {
1458 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
1459 DebugLoc dl = N->getDebugLoc();
1461 SDValue MemAddr, Align;
1462 if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, Align))
1465 SDValue Chain = N->getOperand(0);
1466 EVT VT = N->getValueType(0);
1467 bool is64BitVector = VT.is64BitVector();
1468 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1470 unsigned OpcodeIndex;
1471 switch (VT.getSimpleVT().SimpleTy) {
1472 default: llvm_unreachable("unhandled vld type");
1473 // Double-register operations:
1474 case MVT::v8i8: OpcodeIndex = 0; break;
1475 case MVT::v4i16: OpcodeIndex = 1; break;
1477 case MVT::v2i32: OpcodeIndex = 2; break;
1478 case MVT::v1i64: OpcodeIndex = 3; break;
1479 // Quad-register operations:
1480 case MVT::v16i8: OpcodeIndex = 0; break;
1481 case MVT::v8i16: OpcodeIndex = 1; break;
1483 case MVT::v4i32: OpcodeIndex = 2; break;
1484 case MVT::v2i64: OpcodeIndex = 3;
1485 assert(NumVecs == 1 && "v2i64 type only supported for VLD1");
1493 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1496 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1499 SDValue Pred = getAL(CurDAG);
1500 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1502 if (is64BitVector) {
1503 unsigned Opc = DOpcodes[OpcodeIndex];
1504 const SDValue Ops[] = { MemAddr, Align, Pred, Reg0, Chain };
1505 SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTy, MVT::Other, Ops, 5);
1509 SuperReg = SDValue(VLd, 0);
1510 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1511 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
1512 SDValue D = CurDAG->getTargetExtractSubreg(ARM::dsub_0+Vec,
1514 ReplaceUses(SDValue(N, Vec), D);
1516 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
1521 // Quad registers are directly supported for VLD1 and VLD2,
1522 // loading pairs of D regs.
1523 unsigned Opc = QOpcodes0[OpcodeIndex];
1524 const SDValue Ops[] = { MemAddr, Align, Pred, Reg0, Chain };
1525 SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTy, MVT::Other, Ops, 5);
1529 SuperReg = SDValue(VLd, 0);
1530 Chain = SDValue(VLd, 1);
1533 // Otherwise, quad registers are loaded with two separate instructions,
1534 // where one loads the even registers and the other loads the odd registers.
1535 EVT AddrTy = MemAddr.getValueType();
1537 // Load the even subregs.
1538 unsigned Opc = QOpcodes0[OpcodeIndex];
1540 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
1541 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
1543 CurDAG->getMachineNode(Opc, dl, ResTy, AddrTy, MVT::Other, OpsA, 7);
1544 Chain = SDValue(VLdA, 2);
1546 // Load the odd subregs.
1547 Opc = QOpcodes1[OpcodeIndex];
1548 const SDValue OpsB[] = { SDValue(VLdA, 1), Align, Reg0, SDValue(VLdA, 0),
1549 Pred, Reg0, Chain };
1551 CurDAG->getMachineNode(Opc, dl, ResTy, AddrTy, MVT::Other, OpsB, 7);
1552 SuperReg = SDValue(VLdB, 0);
1553 Chain = SDValue(VLdB, 2);
1556 // Extract out the Q registers.
1557 assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1558 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
1559 SDValue Q = CurDAG->getTargetExtractSubreg(ARM::qsub_0+Vec,
1561 ReplaceUses(SDValue(N, Vec), Q);
1563 ReplaceUses(SDValue(N, NumVecs), Chain);
1567 SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs,
1568 unsigned *DOpcodes, unsigned *QOpcodes0,
1569 unsigned *QOpcodes1) {
1570 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
1571 DebugLoc dl = N->getDebugLoc();
1573 SDValue MemAddr, Align;
1574 if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, Align))
1577 SDValue Chain = N->getOperand(0);
1578 EVT VT = N->getOperand(3).getValueType();
1579 bool is64BitVector = VT.is64BitVector();
1580 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1582 unsigned OpcodeIndex;
1583 switch (VT.getSimpleVT().SimpleTy) {
1584 default: llvm_unreachable("unhandled vst type");
1585 // Double-register operations:
1586 case MVT::v8i8: OpcodeIndex = 0; break;
1587 case MVT::v4i16: OpcodeIndex = 1; break;
1589 case MVT::v2i32: OpcodeIndex = 2; break;
1590 case MVT::v1i64: OpcodeIndex = 3; break;
1591 // Quad-register operations:
1592 case MVT::v16i8: OpcodeIndex = 0; break;
1593 case MVT::v8i16: OpcodeIndex = 1; break;
1595 case MVT::v4i32: OpcodeIndex = 2; break;
1596 case MVT::v2i64: OpcodeIndex = 3;
1597 assert(NumVecs == 1 && "v2i64 type only supported for VST1");
1601 SDValue Pred = getAL(CurDAG);
1602 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1604 SmallVector<SDValue, 7> Ops;
1605 Ops.push_back(MemAddr);
1606 Ops.push_back(Align);
1608 if (is64BitVector) {
1610 Ops.push_back(N->getOperand(3));
1613 SDValue V0 = N->getOperand(0+3);
1614 SDValue V1 = N->getOperand(1+3);
1616 // Form a REG_SEQUENCE to force register allocation.
1618 RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
1620 SDValue V2 = N->getOperand(2+3);
1621 // If it's a vld3, form a quad D-register and leave the last part as
1623 SDValue V3 = (NumVecs == 3)
1624 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
1625 : N->getOperand(3+3);
1626 RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1628 Ops.push_back(RegSeq);
1630 Ops.push_back(Pred);
1631 Ops.push_back(Reg0); // predicate register
1632 Ops.push_back(Chain);
1633 unsigned Opc = DOpcodes[OpcodeIndex];
1634 return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), 6);
1638 // Quad registers are directly supported for VST1 and VST2.
1639 unsigned Opc = QOpcodes0[OpcodeIndex];
1641 Ops.push_back(N->getOperand(3));
1643 // Form a QQ register.
1644 SDValue Q0 = N->getOperand(3);
1645 SDValue Q1 = N->getOperand(4);
1646 Ops.push_back(SDValue(PairQRegs(MVT::v4i64, Q0, Q1), 0));
1648 Ops.push_back(Pred);
1649 Ops.push_back(Reg0); // predicate register
1650 Ops.push_back(Chain);
1651 return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), 6);
1654 // Otherwise, quad registers are stored with two separate instructions,
1655 // where one stores the even registers and the other stores the odd registers.
1657 // Form the QQQQ REG_SEQUENCE.
1658 SDValue V0 = N->getOperand(0+3);
1659 SDValue V1 = N->getOperand(1+3);
1660 SDValue V2 = N->getOperand(2+3);
1661 SDValue V3 = (NumVecs == 3)
1662 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1663 : N->getOperand(3+3);
1664 SDValue RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
1666 // Store the even D registers.
1667 Ops.push_back(Reg0); // post-access address offset
1668 Ops.push_back(RegSeq);
1669 Ops.push_back(Pred);
1670 Ops.push_back(Reg0); // predicate register
1671 Ops.push_back(Chain);
1672 unsigned Opc = QOpcodes0[OpcodeIndex];
1673 SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
1674 MVT::Other, Ops.data(), 7);
1675 Chain = SDValue(VStA, 1);
1677 // Store the odd D registers.
1678 Ops[0] = SDValue(VStA, 0); // MemAddr
1680 Opc = QOpcodes1[OpcodeIndex];
1681 SDNode *VStB = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
1682 MVT::Other, Ops.data(), 7);
1683 Chain = SDValue(VStB, 1);
1684 ReplaceUses(SDValue(N, 0), Chain);
1688 SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
1689 unsigned NumVecs, unsigned *DOpcodes,
1690 unsigned *QOpcodes) {
1691 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
1692 DebugLoc dl = N->getDebugLoc();
1694 SDValue MemAddr, Align;
1695 if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, Align))
1698 SDValue Chain = N->getOperand(0);
1700 cast<ConstantSDNode>(N->getOperand(NumVecs+3))->getZExtValue();
1701 EVT VT = IsLoad ? N->getValueType(0) : N->getOperand(3).getValueType();
1702 bool is64BitVector = VT.is64BitVector();
1704 unsigned Alignment = 0;
1706 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1707 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
1708 if (Alignment > NumBytes)
1709 Alignment = NumBytes;
1710 if (Alignment < 8 && Alignment < NumBytes)
1712 // Alignment must be a power of two; make sure of that.
1713 Alignment = (Alignment & -Alignment);
1717 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1719 unsigned OpcodeIndex;
1720 switch (VT.getSimpleVT().SimpleTy) {
1721 default: llvm_unreachable("unhandled vld/vst lane type");
1722 // Double-register operations:
1723 case MVT::v8i8: OpcodeIndex = 0; break;
1724 case MVT::v4i16: OpcodeIndex = 1; break;
1726 case MVT::v2i32: OpcodeIndex = 2; break;
1727 // Quad-register operations:
1728 case MVT::v8i16: OpcodeIndex = 0; break;
1730 case MVT::v4i32: OpcodeIndex = 1; break;
1733 SDValue Pred = getAL(CurDAG);
1734 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1736 SmallVector<SDValue, 7> Ops;
1737 Ops.push_back(MemAddr);
1738 Ops.push_back(Align);
1740 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1741 QOpcodes[OpcodeIndex]);
1744 SDValue V0 = N->getOperand(0+3);
1745 SDValue V1 = N->getOperand(1+3);
1748 SuperReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
1750 SuperReg = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0);
1752 SDValue V2 = N->getOperand(2+3);
1753 SDValue V3 = (NumVecs == 3)
1754 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
1755 : N->getOperand(3+3);
1757 SuperReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1759 SuperReg = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
1761 Ops.push_back(SuperReg);
1762 Ops.push_back(getI32Imm(Lane));
1763 Ops.push_back(Pred);
1764 Ops.push_back(Reg0);
1765 Ops.push_back(Chain);
1768 return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), 7);
1771 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1774 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1776 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTy, MVT::Other,
1778 SuperReg = SDValue(VLdLn, 0);
1779 Chain = SDValue(VLdLn, 1);
1781 // Extract the subregisters.
1782 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1783 assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1784 unsigned SubIdx = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
1785 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1786 ReplaceUses(SDValue(N, Vec),
1787 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
1788 ReplaceUses(SDValue(N, NumVecs), Chain);
1792 SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, unsigned NumVecs,
1793 unsigned *Opcodes) {
1794 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
1795 DebugLoc dl = N->getDebugLoc();
1797 SDValue MemAddr, Align;
1798 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
1801 SDValue Chain = N->getOperand(0);
1802 EVT VT = N->getValueType(0);
1804 unsigned Alignment = 0;
1806 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1807 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
1808 if (Alignment > NumBytes)
1809 Alignment = NumBytes;
1810 if (Alignment < 8 && Alignment < NumBytes)
1812 // Alignment must be a power of two; make sure of that.
1813 Alignment = (Alignment & -Alignment);
1817 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1819 unsigned OpcodeIndex;
1820 switch (VT.getSimpleVT().SimpleTy) {
1821 default: llvm_unreachable("unhandled vld-dup type");
1822 case MVT::v8i8: OpcodeIndex = 0; break;
1823 case MVT::v4i16: OpcodeIndex = 1; break;
1825 case MVT::v2i32: OpcodeIndex = 2; break;
1828 SDValue Pred = getAL(CurDAG);
1829 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1831 unsigned Opc = Opcodes[OpcodeIndex];
1832 const SDValue Ops[] = { MemAddr, Align, Pred, Reg0, Chain };
1834 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1835 EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1836 SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTy, MVT::Other, Ops, 5);
1837 SuperReg = SDValue(VLdDup, 0);
1838 Chain = SDValue(VLdDup, 1);
1840 // Extract the subregisters.
1841 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1842 unsigned SubIdx = ARM::dsub_0;
1843 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1844 ReplaceUses(SDValue(N, Vec),
1845 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
1846 ReplaceUses(SDValue(N, NumVecs), Chain);
1850 SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
1852 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
1853 DebugLoc dl = N->getDebugLoc();
1854 EVT VT = N->getValueType(0);
1855 unsigned FirstTblReg = IsExt ? 2 : 1;
1857 // Form a REG_SEQUENCE to force register allocation.
1859 SDValue V0 = N->getOperand(FirstTblReg + 0);
1860 SDValue V1 = N->getOperand(FirstTblReg + 1);
1862 RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0);
1864 SDValue V2 = N->getOperand(FirstTblReg + 2);
1865 // If it's a vtbl3, form a quad D-register and leave the last part as
1867 SDValue V3 = (NumVecs == 3)
1868 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1869 : N->getOperand(FirstTblReg + 3);
1870 RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1873 SmallVector<SDValue, 6> Ops;
1875 Ops.push_back(N->getOperand(1));
1876 Ops.push_back(RegSeq);
1877 Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
1878 Ops.push_back(getAL(CurDAG)); // predicate
1879 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
1880 return CurDAG->getMachineNode(Opc, dl, VT, Ops.data(), Ops.size());
1883 SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
1885 if (!Subtarget->hasV6T2Ops())
1888 unsigned Opc = isSigned ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
1889 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
1892 // For unsigned extracts, check for a shift right and mask
1893 unsigned And_imm = 0;
1894 if (N->getOpcode() == ISD::AND) {
1895 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
1897 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
1898 if (And_imm & (And_imm + 1))
1901 unsigned Srl_imm = 0;
1902 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
1904 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
1906 unsigned Width = CountTrailingOnes_32(And_imm);
1907 unsigned LSB = Srl_imm;
1908 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1909 SDValue Ops[] = { N->getOperand(0).getOperand(0),
1910 CurDAG->getTargetConstant(LSB, MVT::i32),
1911 CurDAG->getTargetConstant(Width, MVT::i32),
1912 getAL(CurDAG), Reg0 };
1913 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
1919 // Otherwise, we're looking for a shift of a shift
1920 unsigned Shl_imm = 0;
1921 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
1922 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
1923 unsigned Srl_imm = 0;
1924 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
1925 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
1926 unsigned Width = 32 - Srl_imm;
1927 int LSB = Srl_imm - Shl_imm;
1930 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1931 SDValue Ops[] = { N->getOperand(0).getOperand(0),
1932 CurDAG->getTargetConstant(LSB, MVT::i32),
1933 CurDAG->getTargetConstant(Width, MVT::i32),
1934 getAL(CurDAG), Reg0 };
1935 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
1941 SDNode *ARMDAGToDAGISel::
1942 SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
1943 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
1946 if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) {
1947 unsigned SOVal = cast<ConstantSDNode>(CPTmp1)->getZExtValue();
1948 unsigned SOShOp = ARM_AM::getSORegShOp(SOVal);
1951 case ARM_AM::lsl: Opc = ARM::t2MOVCClsl; break;
1952 case ARM_AM::lsr: Opc = ARM::t2MOVCClsr; break;
1953 case ARM_AM::asr: Opc = ARM::t2MOVCCasr; break;
1954 case ARM_AM::ror: Opc = ARM::t2MOVCCror; break;
1956 llvm_unreachable("Unknown so_reg opcode!");
1960 CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32);
1961 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
1962 SDValue Ops[] = { FalseVal, CPTmp0, SOShImm, CC, CCR, InFlag };
1963 return CurDAG->SelectNodeTo(N, Opc, MVT::i32,Ops, 6);
1968 SDNode *ARMDAGToDAGISel::
1969 SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
1970 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
1974 if (SelectShifterOperandReg(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
1975 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
1976 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag };
1977 return CurDAG->SelectNodeTo(N, ARM::MOVCCs, MVT::i32, Ops, 7);
1982 SDNode *ARMDAGToDAGISel::
1983 SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
1984 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
1985 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
1990 unsigned TrueImm = T->getZExtValue();
1991 if (is_t2_so_imm(TrueImm)) {
1992 Opc = ARM::t2MOVCCi;
1993 } else if (TrueImm <= 0xffff) {
1994 Opc = ARM::t2MOVCCi16;
1995 } else if (is_t2_so_imm_not(TrueImm)) {
1997 Opc = ARM::t2MVNCCi;
1998 } else if (TrueVal.getNode()->hasOneUse() && Subtarget->hasV6T2Ops()) {
2000 Opc = ARM::t2MOVCCi32imm;
2004 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2005 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2006 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2007 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2013 SDNode *ARMDAGToDAGISel::
2014 SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2015 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2016 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2021 unsigned TrueImm = T->getZExtValue();
2022 bool isSoImm = is_so_imm(TrueImm);
2025 } else if (Subtarget->hasV6T2Ops() && TrueImm <= 0xffff) {
2026 Opc = ARM::MOVCCi16;
2027 } else if (is_so_imm_not(TrueImm)) {
2030 } else if (TrueVal.getNode()->hasOneUse() &&
2031 (Subtarget->hasV6T2Ops() || ARM_AM::isSOImmTwoPartVal(TrueImm))) {
2033 Opc = ARM::MOVCCi32imm;
2037 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2038 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2039 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2040 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2046 SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
2047 EVT VT = N->getValueType(0);
2048 SDValue FalseVal = N->getOperand(0);
2049 SDValue TrueVal = N->getOperand(1);
2050 SDValue CC = N->getOperand(2);
2051 SDValue CCR = N->getOperand(3);
2052 SDValue InFlag = N->getOperand(4);
2053 assert(CC.getOpcode() == ISD::Constant);
2054 assert(CCR.getOpcode() == ISD::Register);
2055 ARMCC::CondCodes CCVal =
2056 (ARMCC::CondCodes)cast<ConstantSDNode>(CC)->getZExtValue();
2058 if (!Subtarget->isThumb1Only() && VT == MVT::i32) {
2059 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2060 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2061 // Pattern complexity = 18 cost = 1 size = 0
2065 if (Subtarget->isThumb()) {
2066 SDNode *Res = SelectT2CMOVShiftOp(N, FalseVal, TrueVal,
2067 CCVal, CCR, InFlag);
2069 Res = SelectT2CMOVShiftOp(N, TrueVal, FalseVal,
2070 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2074 SDNode *Res = SelectARMCMOVShiftOp(N, FalseVal, TrueVal,
2075 CCVal, CCR, InFlag);
2077 Res = SelectARMCMOVShiftOp(N, TrueVal, FalseVal,
2078 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2083 // Pattern: (ARMcmov:i32 GPR:i32:$false,
2084 // (imm:i32)<<P:Pred_so_imm>>:$true,
2086 // Emits: (MOVCCi:i32 GPR:i32:$false,
2087 // (so_imm:i32 (imm:i32):$true), (imm:i32):$cc)
2088 // Pattern complexity = 10 cost = 1 size = 0
2089 if (Subtarget->isThumb()) {
2090 SDNode *Res = SelectT2CMOVImmOp(N, FalseVal, TrueVal,
2091 CCVal, CCR, InFlag);
2093 Res = SelectT2CMOVImmOp(N, TrueVal, FalseVal,
2094 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2098 SDNode *Res = SelectARMCMOVImmOp(N, FalseVal, TrueVal,
2099 CCVal, CCR, InFlag);
2101 Res = SelectARMCMOVImmOp(N, TrueVal, FalseVal,
2102 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2108 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2109 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2110 // Pattern complexity = 6 cost = 1 size = 0
2112 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2113 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2114 // Pattern complexity = 6 cost = 11 size = 0
2116 // Also FCPYScc and FCPYDcc.
2117 SDValue Tmp2 = CurDAG->getTargetConstant(CCVal, MVT::i32);
2118 SDValue Ops[] = { FalseVal, TrueVal, Tmp2, CCR, InFlag };
2120 switch (VT.getSimpleVT().SimpleTy) {
2121 default: assert(false && "Illegal conditional move type!");
2124 Opc = Subtarget->isThumb()
2125 ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr_pseudo)
2135 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5);
2138 SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
2139 // The only time a CONCAT_VECTORS operation can have legal types is when
2140 // two 64-bit vectors are concatenated to a 128-bit vector.
2141 EVT VT = N->getValueType(0);
2142 if (!VT.is128BitVector() || N->getNumOperands() != 2)
2143 llvm_unreachable("unexpected CONCAT_VECTORS");
2144 return PairDRegs(VT, N->getOperand(0), N->getOperand(1));
2147 SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
2148 DebugLoc dl = N->getDebugLoc();
2150 if (N->isMachineOpcode())
2151 return NULL; // Already selected.
2153 switch (N->getOpcode()) {
2155 case ISD::Constant: {
2156 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
2158 if (Subtarget->hasThumb2())
2159 // Thumb2-aware targets have the MOVT instruction, so all immediates can
2160 // be done with MOV + MOVT, at worst.
2163 if (Subtarget->isThumb()) {
2164 UseCP = (Val > 255 && // MOV
2165 ~Val > 255 && // MOV + MVN
2166 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
2168 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
2169 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
2170 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
2175 CurDAG->getTargetConstantPool(ConstantInt::get(
2176 Type::getInt32Ty(*CurDAG->getContext()), Val),
2177 TLI.getPointerTy());
2180 if (Subtarget->isThumb1Only()) {
2181 SDValue Pred = getAL(CurDAG);
2182 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2183 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
2184 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
2189 CurDAG->getTargetConstant(0, MVT::i32),
2191 CurDAG->getRegister(0, MVT::i32),
2192 CurDAG->getEntryNode()
2194 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
2197 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
2201 // Other cases are autogenerated.
2204 case ISD::FrameIndex: {
2205 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
2206 int FI = cast<FrameIndexSDNode>(N)->getIndex();
2207 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
2208 if (Subtarget->isThumb1Only()) {
2209 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI,
2210 CurDAG->getTargetConstant(0, MVT::i32));
2212 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
2213 ARM::t2ADDri : ARM::ADDri);
2214 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2215 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2216 CurDAG->getRegister(0, MVT::i32) };
2217 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2221 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2225 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
2229 if (Subtarget->isThumb1Only())
2231 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
2232 unsigned RHSV = C->getZExtValue();
2234 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
2235 unsigned ShImm = Log2_32(RHSV-1);
2238 SDValue V = N->getOperand(0);
2239 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2240 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2241 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2242 if (Subtarget->isThumb()) {
2243 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2244 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6);
2246 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2247 return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7);
2250 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
2251 unsigned ShImm = Log2_32(RHSV+1);
2254 SDValue V = N->getOperand(0);
2255 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2256 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2257 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2258 if (Subtarget->isThumb()) {
2259 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2260 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6);
2262 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2263 return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7);
2269 // Check for unsigned bitfield extract
2270 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2273 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
2274 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
2275 // are entirely contributed by c2 and lower 16-bits are entirely contributed
2276 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
2277 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
2278 EVT VT = N->getValueType(0);
2281 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
2283 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
2286 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2287 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2290 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
2291 SDValue N2 = N0.getOperand(1);
2292 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2295 unsigned N1CVal = N1C->getZExtValue();
2296 unsigned N2CVal = N2C->getZExtValue();
2297 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
2298 (N1CVal & 0xffffU) == 0xffffU &&
2299 (N2CVal & 0xffffU) == 0x0U) {
2300 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
2302 SDValue Ops[] = { N0.getOperand(0), Imm16,
2303 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2304 return CurDAG->getMachineNode(Opc, dl, VT, Ops, 4);
2309 case ARMISD::VMOVRRD:
2310 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
2311 N->getOperand(0), getAL(CurDAG),
2312 CurDAG->getRegister(0, MVT::i32));
2313 case ISD::UMUL_LOHI: {
2314 if (Subtarget->isThumb1Only())
2316 if (Subtarget->isThumb()) {
2317 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2318 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2319 CurDAG->getRegister(0, MVT::i32) };
2320 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32,Ops,4);
2322 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2323 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2324 CurDAG->getRegister(0, MVT::i32) };
2325 return CurDAG->getMachineNode(ARM::UMULL, dl, MVT::i32, MVT::i32, Ops, 5);
2328 case ISD::SMUL_LOHI: {
2329 if (Subtarget->isThumb1Only())
2331 if (Subtarget->isThumb()) {
2332 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2333 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2334 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32,Ops,4);
2336 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2337 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2338 CurDAG->getRegister(0, MVT::i32) };
2339 return CurDAG->getMachineNode(ARM::SMULL, dl, MVT::i32, MVT::i32, Ops, 5);
2343 SDNode *ResNode = 0;
2344 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2345 ResNode = SelectT2IndexedLoad(N);
2347 ResNode = SelectARMIndexedLoad(N);
2350 // Other cases are autogenerated.
2353 case ARMISD::BRCOND: {
2354 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2355 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2356 // Pattern complexity = 6 cost = 1 size = 0
2358 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2359 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
2360 // Pattern complexity = 6 cost = 1 size = 0
2362 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2363 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2364 // Pattern complexity = 6 cost = 1 size = 0
2366 unsigned Opc = Subtarget->isThumb() ?
2367 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
2368 SDValue Chain = N->getOperand(0);
2369 SDValue N1 = N->getOperand(1);
2370 SDValue N2 = N->getOperand(2);
2371 SDValue N3 = N->getOperand(3);
2372 SDValue InFlag = N->getOperand(4);
2373 assert(N1.getOpcode() == ISD::BasicBlock);
2374 assert(N2.getOpcode() == ISD::Constant);
2375 assert(N3.getOpcode() == ISD::Register);
2377 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
2378 cast<ConstantSDNode>(N2)->getZExtValue()),
2380 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
2381 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
2383 Chain = SDValue(ResNode, 0);
2384 if (N->getNumValues() == 2) {
2385 InFlag = SDValue(ResNode, 1);
2386 ReplaceUses(SDValue(N, 1), InFlag);
2388 ReplaceUses(SDValue(N, 0),
2389 SDValue(Chain.getNode(), Chain.getResNo()));
2393 return SelectCMOVOp(N);
2394 case ARMISD::CNEG: {
2395 EVT VT = N->getValueType(0);
2396 SDValue N0 = N->getOperand(0);
2397 SDValue N1 = N->getOperand(1);
2398 SDValue N2 = N->getOperand(2);
2399 SDValue N3 = N->getOperand(3);
2400 SDValue InFlag = N->getOperand(4);
2401 assert(N2.getOpcode() == ISD::Constant);
2402 assert(N3.getOpcode() == ISD::Register);
2404 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
2405 cast<ConstantSDNode>(N2)->getZExtValue()),
2407 SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
2409 switch (VT.getSimpleVT().SimpleTy) {
2410 default: assert(false && "Illegal conditional move type!");
2419 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5);
2422 case ARMISD::VZIP: {
2424 EVT VT = N->getValueType(0);
2425 switch (VT.getSimpleVT().SimpleTy) {
2426 default: return NULL;
2427 case MVT::v8i8: Opc = ARM::VZIPd8; break;
2428 case MVT::v4i16: Opc = ARM::VZIPd16; break;
2430 case MVT::v2i32: Opc = ARM::VZIPd32; break;
2431 case MVT::v16i8: Opc = ARM::VZIPq8; break;
2432 case MVT::v8i16: Opc = ARM::VZIPq16; break;
2434 case MVT::v4i32: Opc = ARM::VZIPq32; break;
2436 SDValue Pred = getAL(CurDAG);
2437 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2438 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2439 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2441 case ARMISD::VUZP: {
2443 EVT VT = N->getValueType(0);
2444 switch (VT.getSimpleVT().SimpleTy) {
2445 default: return NULL;
2446 case MVT::v8i8: Opc = ARM::VUZPd8; break;
2447 case MVT::v4i16: Opc = ARM::VUZPd16; break;
2449 case MVT::v2i32: Opc = ARM::VUZPd32; break;
2450 case MVT::v16i8: Opc = ARM::VUZPq8; break;
2451 case MVT::v8i16: Opc = ARM::VUZPq16; break;
2453 case MVT::v4i32: Opc = ARM::VUZPq32; break;
2455 SDValue Pred = getAL(CurDAG);
2456 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2457 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2458 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2460 case ARMISD::VTRN: {
2462 EVT VT = N->getValueType(0);
2463 switch (VT.getSimpleVT().SimpleTy) {
2464 default: return NULL;
2465 case MVT::v8i8: Opc = ARM::VTRNd8; break;
2466 case MVT::v4i16: Opc = ARM::VTRNd16; break;
2468 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2469 case MVT::v16i8: Opc = ARM::VTRNq8; break;
2470 case MVT::v8i16: Opc = ARM::VTRNq16; break;
2472 case MVT::v4i32: Opc = ARM::VTRNq32; break;
2474 SDValue Pred = getAL(CurDAG);
2475 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2476 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2477 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2479 case ARMISD::BUILD_VECTOR: {
2480 EVT VecVT = N->getValueType(0);
2481 EVT EltVT = VecVT.getVectorElementType();
2482 unsigned NumElts = VecVT.getVectorNumElements();
2483 if (EltVT == MVT::f64) {
2484 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
2485 return PairDRegs(VecVT, N->getOperand(0), N->getOperand(1));
2487 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
2489 return PairSRegs(VecVT, N->getOperand(0), N->getOperand(1));
2490 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
2491 return QuadSRegs(VecVT, N->getOperand(0), N->getOperand(1),
2492 N->getOperand(2), N->getOperand(3));
2495 case ARMISD::VLD2DUP: {
2496 unsigned Opcodes[] = { ARM::VLD2DUPd8Pseudo, ARM::VLD2DUPd16Pseudo,
2497 ARM::VLD2DUPd32Pseudo };
2498 return SelectVLDDup(N, 2, Opcodes);
2501 case ARMISD::VLD3DUP: {
2502 unsigned Opcodes[] = { ARM::VLD3DUPd8Pseudo, ARM::VLD3DUPd16Pseudo,
2503 ARM::VLD3DUPd32Pseudo };
2504 return SelectVLDDup(N, 3, Opcodes);
2507 case ARMISD::VLD4DUP: {
2508 unsigned Opcodes[] = { ARM::VLD4DUPd8Pseudo, ARM::VLD4DUPd16Pseudo,
2509 ARM::VLD4DUPd32Pseudo };
2510 return SelectVLDDup(N, 4, Opcodes);
2513 case ISD::INTRINSIC_VOID:
2514 case ISD::INTRINSIC_W_CHAIN: {
2515 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2520 case Intrinsic::arm_neon_vld1: {
2521 unsigned DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
2522 ARM::VLD1d32, ARM::VLD1d64 };
2523 unsigned QOpcodes[] = { ARM::VLD1q8Pseudo, ARM::VLD1q16Pseudo,
2524 ARM::VLD1q32Pseudo, ARM::VLD1q64Pseudo };
2525 return SelectVLD(N, 1, DOpcodes, QOpcodes, 0);
2528 case Intrinsic::arm_neon_vld2: {
2529 unsigned DOpcodes[] = { ARM::VLD2d8Pseudo, ARM::VLD2d16Pseudo,
2530 ARM::VLD2d32Pseudo, ARM::VLD1q64Pseudo };
2531 unsigned QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
2532 ARM::VLD2q32Pseudo };
2533 return SelectVLD(N, 2, DOpcodes, QOpcodes, 0);
2536 case Intrinsic::arm_neon_vld3: {
2537 unsigned DOpcodes[] = { ARM::VLD3d8Pseudo, ARM::VLD3d16Pseudo,
2538 ARM::VLD3d32Pseudo, ARM::VLD1d64TPseudo };
2539 unsigned QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
2540 ARM::VLD3q16Pseudo_UPD,
2541 ARM::VLD3q32Pseudo_UPD };
2542 unsigned QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
2543 ARM::VLD3q16oddPseudo_UPD,
2544 ARM::VLD3q32oddPseudo_UPD };
2545 return SelectVLD(N, 3, DOpcodes, QOpcodes0, QOpcodes1);
2548 case Intrinsic::arm_neon_vld4: {
2549 unsigned DOpcodes[] = { ARM::VLD4d8Pseudo, ARM::VLD4d16Pseudo,
2550 ARM::VLD4d32Pseudo, ARM::VLD1d64QPseudo };
2551 unsigned QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
2552 ARM::VLD4q16Pseudo_UPD,
2553 ARM::VLD4q32Pseudo_UPD };
2554 unsigned QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
2555 ARM::VLD4q16oddPseudo_UPD,
2556 ARM::VLD4q32oddPseudo_UPD };
2557 return SelectVLD(N, 4, DOpcodes, QOpcodes0, QOpcodes1);
2560 case Intrinsic::arm_neon_vld2lane: {
2561 unsigned DOpcodes[] = { ARM::VLD2LNd8Pseudo, ARM::VLD2LNd16Pseudo,
2562 ARM::VLD2LNd32Pseudo };
2563 unsigned QOpcodes[] = { ARM::VLD2LNq16Pseudo, ARM::VLD2LNq32Pseudo };
2564 return SelectVLDSTLane(N, true, 2, DOpcodes, QOpcodes);
2567 case Intrinsic::arm_neon_vld3lane: {
2568 unsigned DOpcodes[] = { ARM::VLD3LNd8Pseudo, ARM::VLD3LNd16Pseudo,
2569 ARM::VLD3LNd32Pseudo };
2570 unsigned QOpcodes[] = { ARM::VLD3LNq16Pseudo, ARM::VLD3LNq32Pseudo };
2571 return SelectVLDSTLane(N, true, 3, DOpcodes, QOpcodes);
2574 case Intrinsic::arm_neon_vld4lane: {
2575 unsigned DOpcodes[] = { ARM::VLD4LNd8Pseudo, ARM::VLD4LNd16Pseudo,
2576 ARM::VLD4LNd32Pseudo };
2577 unsigned QOpcodes[] = { ARM::VLD4LNq16Pseudo, ARM::VLD4LNq32Pseudo };
2578 return SelectVLDSTLane(N, true, 4, DOpcodes, QOpcodes);
2581 case Intrinsic::arm_neon_vst1: {
2582 unsigned DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
2583 ARM::VST1d32, ARM::VST1d64 };
2584 unsigned QOpcodes[] = { ARM::VST1q8Pseudo, ARM::VST1q16Pseudo,
2585 ARM::VST1q32Pseudo, ARM::VST1q64Pseudo };
2586 return SelectVST(N, 1, DOpcodes, QOpcodes, 0);
2589 case Intrinsic::arm_neon_vst2: {
2590 unsigned DOpcodes[] = { ARM::VST2d8Pseudo, ARM::VST2d16Pseudo,
2591 ARM::VST2d32Pseudo, ARM::VST1q64Pseudo };
2592 unsigned QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
2593 ARM::VST2q32Pseudo };
2594 return SelectVST(N, 2, DOpcodes, QOpcodes, 0);
2597 case Intrinsic::arm_neon_vst3: {
2598 unsigned DOpcodes[] = { ARM::VST3d8Pseudo, ARM::VST3d16Pseudo,
2599 ARM::VST3d32Pseudo, ARM::VST1d64TPseudo };
2600 unsigned QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
2601 ARM::VST3q16Pseudo_UPD,
2602 ARM::VST3q32Pseudo_UPD };
2603 unsigned QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
2604 ARM::VST3q16oddPseudo_UPD,
2605 ARM::VST3q32oddPseudo_UPD };
2606 return SelectVST(N, 3, DOpcodes, QOpcodes0, QOpcodes1);
2609 case Intrinsic::arm_neon_vst4: {
2610 unsigned DOpcodes[] = { ARM::VST4d8Pseudo, ARM::VST4d16Pseudo,
2611 ARM::VST4d32Pseudo, ARM::VST1d64QPseudo };
2612 unsigned QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
2613 ARM::VST4q16Pseudo_UPD,
2614 ARM::VST4q32Pseudo_UPD };
2615 unsigned QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
2616 ARM::VST4q16oddPseudo_UPD,
2617 ARM::VST4q32oddPseudo_UPD };
2618 return SelectVST(N, 4, DOpcodes, QOpcodes0, QOpcodes1);
2621 case Intrinsic::arm_neon_vst2lane: {
2622 unsigned DOpcodes[] = { ARM::VST2LNd8Pseudo, ARM::VST2LNd16Pseudo,
2623 ARM::VST2LNd32Pseudo };
2624 unsigned QOpcodes[] = { ARM::VST2LNq16Pseudo, ARM::VST2LNq32Pseudo };
2625 return SelectVLDSTLane(N, false, 2, DOpcodes, QOpcodes);
2628 case Intrinsic::arm_neon_vst3lane: {
2629 unsigned DOpcodes[] = { ARM::VST3LNd8Pseudo, ARM::VST3LNd16Pseudo,
2630 ARM::VST3LNd32Pseudo };
2631 unsigned QOpcodes[] = { ARM::VST3LNq16Pseudo, ARM::VST3LNq32Pseudo };
2632 return SelectVLDSTLane(N, false, 3, DOpcodes, QOpcodes);
2635 case Intrinsic::arm_neon_vst4lane: {
2636 unsigned DOpcodes[] = { ARM::VST4LNd8Pseudo, ARM::VST4LNd16Pseudo,
2637 ARM::VST4LNd32Pseudo };
2638 unsigned QOpcodes[] = { ARM::VST4LNq16Pseudo, ARM::VST4LNq32Pseudo };
2639 return SelectVLDSTLane(N, false, 4, DOpcodes, QOpcodes);
2645 case ISD::INTRINSIC_WO_CHAIN: {
2646 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
2651 case Intrinsic::arm_neon_vtbl2:
2652 return SelectVTBL(N, false, 2, ARM::VTBL2Pseudo);
2653 case Intrinsic::arm_neon_vtbl3:
2654 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
2655 case Intrinsic::arm_neon_vtbl4:
2656 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
2658 case Intrinsic::arm_neon_vtbx2:
2659 return SelectVTBL(N, true, 2, ARM::VTBX2Pseudo);
2660 case Intrinsic::arm_neon_vtbx3:
2661 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
2662 case Intrinsic::arm_neon_vtbx4:
2663 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
2668 case ISD::CONCAT_VECTORS:
2669 return SelectConcatVector(N);
2672 return SelectCode(N);
2675 bool ARMDAGToDAGISel::
2676 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
2677 std::vector<SDValue> &OutOps) {
2678 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
2679 // Require the address to be in a register. That is safe for all ARM
2680 // variants and it is hard to do anything much smarter without knowing
2681 // how the operand is used.
2682 OutOps.push_back(Op);
2686 /// createARMISelDag - This pass converts a legalized DAG into a
2687 /// ARM-specific DAG, ready for instruction scheduling.
2689 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
2690 CodeGenOpt::Level OptLevel) {
2691 return new ARMDAGToDAGISel(TM, OptLevel);