1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
15 #include "ARMAddressingModes.h"
16 #include "ARMConstantPoolValue.h"
17 #include "ARMISelLowering.h"
18 #include "ARMTargetMachine.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/SelectionDAGISel.h"
29 #include "llvm/Target/TargetLowering.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/Support/Compiler.h"
32 #include "llvm/Support/Debug.h"
35 static const unsigned arm_dsubreg_0 = 5;
36 static const unsigned arm_dsubreg_1 = 6;
38 //===--------------------------------------------------------------------===//
39 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
40 /// instructions for SelectionDAG operations.
43 class ARMDAGToDAGISel : public SelectionDAGISel {
46 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
47 /// make the right decision when generating code for different targets.
48 const ARMSubtarget *Subtarget;
51 explicit ARMDAGToDAGISel(ARMTargetMachine &tm)
52 : SelectionDAGISel(tm), TM(tm),
53 Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
56 virtual const char *getPassName() const {
57 return "ARM Instruction Selection";
60 /// getI32Imm - Return a target constant with the specified value, of type i32.
61 inline SDValue getI32Imm(unsigned Imm) {
62 return CurDAG->getTargetConstant(Imm, MVT::i32);
65 SDNode *Select(SDValue Op);
66 virtual void InstructionSelect();
67 bool SelectAddrMode2(SDValue Op, SDValue N, SDValue &Base,
68 SDValue &Offset, SDValue &Opc);
69 bool SelectAddrMode2Offset(SDValue Op, SDValue N,
70 SDValue &Offset, SDValue &Opc);
71 bool SelectAddrMode3(SDValue Op, SDValue N, SDValue &Base,
72 SDValue &Offset, SDValue &Opc);
73 bool SelectAddrMode3Offset(SDValue Op, SDValue N,
74 SDValue &Offset, SDValue &Opc);
75 bool SelectAddrMode5(SDValue Op, SDValue N, SDValue &Base,
78 bool SelectAddrModePC(SDValue Op, SDValue N, SDValue &Offset,
81 bool SelectThumbAddrModeRR(SDValue Op, SDValue N, SDValue &Base,
83 bool SelectThumbAddrModeRI5(SDValue Op, SDValue N, unsigned Scale,
84 SDValue &Base, SDValue &OffImm,
86 bool SelectThumbAddrModeS1(SDValue Op, SDValue N, SDValue &Base,
87 SDValue &OffImm, SDValue &Offset);
88 bool SelectThumbAddrModeS2(SDValue Op, SDValue N, SDValue &Base,
89 SDValue &OffImm, SDValue &Offset);
90 bool SelectThumbAddrModeS4(SDValue Op, SDValue N, SDValue &Base,
91 SDValue &OffImm, SDValue &Offset);
92 bool SelectThumbAddrModeSP(SDValue Op, SDValue N, SDValue &Base,
95 bool SelectThumb2ShifterOperandReg(SDValue Op, SDValue N,
96 SDValue &BaseReg, SDValue &Opc);
98 bool SelectShifterOperandReg(SDValue Op, SDValue N, SDValue &A,
99 SDValue &B, SDValue &C);
101 // Include the pieces autogenerated from the target description.
102 #include "ARMGenDAGISel.inc"
105 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
106 /// inline asm expressions.
107 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
109 std::vector<SDValue> &OutOps);
113 void ARMDAGToDAGISel::InstructionSelect() {
117 CurDAG->RemoveDeadNodes();
120 bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
121 SDValue &Base, SDValue &Offset,
123 if (N.getOpcode() == ISD::MUL) {
124 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
125 // X * [3,5,9] -> X + X * [2,4,8] etc.
126 int RHSC = (int)RHS->getZExtValue();
129 ARM_AM::AddrOpc AddSub = ARM_AM::add;
131 AddSub = ARM_AM::sub;
134 if (isPowerOf2_32(RHSC)) {
135 unsigned ShAmt = Log2_32(RHSC);
136 Base = Offset = N.getOperand(0);
137 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
146 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
148 if (N.getOpcode() == ISD::FrameIndex) {
149 int FI = cast<FrameIndexSDNode>(N)->getIndex();
150 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
151 } else if (N.getOpcode() == ARMISD::Wrapper) {
152 Base = N.getOperand(0);
154 Offset = CurDAG->getRegister(0, MVT::i32);
155 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
161 // Match simple R +/- imm12 operands.
162 if (N.getOpcode() == ISD::ADD)
163 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
164 int RHSC = (int)RHS->getZExtValue();
165 if ((RHSC >= 0 && RHSC < 0x1000) ||
166 (RHSC < 0 && RHSC > -0x1000)) { // 12 bits.
167 Base = N.getOperand(0);
168 if (Base.getOpcode() == ISD::FrameIndex) {
169 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
170 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
172 Offset = CurDAG->getRegister(0, MVT::i32);
174 ARM_AM::AddrOpc AddSub = ARM_AM::add;
176 AddSub = ARM_AM::sub;
179 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
186 // Otherwise this is R +/- [possibly shifted] R
187 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub;
188 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
191 Base = N.getOperand(0);
192 Offset = N.getOperand(1);
194 if (ShOpcVal != ARM_AM::no_shift) {
195 // Check to see if the RHS of the shift is a constant, if not, we can't fold
197 if (ConstantSDNode *Sh =
198 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
199 ShAmt = Sh->getZExtValue();
200 Offset = N.getOperand(1).getOperand(0);
202 ShOpcVal = ARM_AM::no_shift;
206 // Try matching (R shl C) + (R).
207 if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift) {
208 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
209 if (ShOpcVal != ARM_AM::no_shift) {
210 // Check to see if the RHS of the shift is a constant, if not, we can't
212 if (ConstantSDNode *Sh =
213 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
214 ShAmt = Sh->getZExtValue();
215 Offset = N.getOperand(0).getOperand(0);
216 Base = N.getOperand(1);
218 ShOpcVal = ARM_AM::no_shift;
223 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
228 bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDValue Op, SDValue N,
229 SDValue &Offset, SDValue &Opc) {
230 unsigned Opcode = Op.getOpcode();
231 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
232 ? cast<LoadSDNode>(Op)->getAddressingMode()
233 : cast<StoreSDNode>(Op)->getAddressingMode();
234 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
235 ? ARM_AM::add : ARM_AM::sub;
236 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
237 int Val = (int)C->getZExtValue();
238 if (Val >= 0 && Val < 0x1000) { // 12 bits.
239 Offset = CurDAG->getRegister(0, MVT::i32);
240 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
248 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
250 if (ShOpcVal != ARM_AM::no_shift) {
251 // Check to see if the RHS of the shift is a constant, if not, we can't fold
253 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
254 ShAmt = Sh->getZExtValue();
255 Offset = N.getOperand(0);
257 ShOpcVal = ARM_AM::no_shift;
261 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
267 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N,
268 SDValue &Base, SDValue &Offset,
270 if (N.getOpcode() == ISD::SUB) {
271 // X - C is canonicalize to X + -C, no need to handle it here.
272 Base = N.getOperand(0);
273 Offset = N.getOperand(1);
274 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
278 if (N.getOpcode() != ISD::ADD) {
280 if (N.getOpcode() == ISD::FrameIndex) {
281 int FI = cast<FrameIndexSDNode>(N)->getIndex();
282 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
284 Offset = CurDAG->getRegister(0, MVT::i32);
285 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
289 // If the RHS is +/- imm8, fold into addr mode.
290 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
291 int RHSC = (int)RHS->getZExtValue();
292 if ((RHSC >= 0 && RHSC < 256) ||
293 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
294 Base = N.getOperand(0);
295 if (Base.getOpcode() == ISD::FrameIndex) {
296 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
297 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
299 Offset = CurDAG->getRegister(0, MVT::i32);
301 ARM_AM::AddrOpc AddSub = ARM_AM::add;
303 AddSub = ARM_AM::sub;
306 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
311 Base = N.getOperand(0);
312 Offset = N.getOperand(1);
313 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
317 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDValue Op, SDValue N,
318 SDValue &Offset, SDValue &Opc) {
319 unsigned Opcode = Op.getOpcode();
320 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
321 ? cast<LoadSDNode>(Op)->getAddressingMode()
322 : cast<StoreSDNode>(Op)->getAddressingMode();
323 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
324 ? ARM_AM::add : ARM_AM::sub;
325 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
326 int Val = (int)C->getZExtValue();
327 if (Val >= 0 && Val < 256) {
328 Offset = CurDAG->getRegister(0, MVT::i32);
329 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
335 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
340 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N,
341 SDValue &Base, SDValue &Offset) {
342 if (N.getOpcode() != ISD::ADD) {
344 if (N.getOpcode() == ISD::FrameIndex) {
345 int FI = cast<FrameIndexSDNode>(N)->getIndex();
346 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
347 } else if (N.getOpcode() == ARMISD::Wrapper) {
348 Base = N.getOperand(0);
350 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
355 // If the RHS is +/- imm8, fold into addr mode.
356 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
357 int RHSC = (int)RHS->getZExtValue();
358 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied by 4.
360 if ((RHSC >= 0 && RHSC < 256) ||
361 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
362 Base = N.getOperand(0);
363 if (Base.getOpcode() == ISD::FrameIndex) {
364 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
365 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
368 ARM_AM::AddrOpc AddSub = ARM_AM::add;
370 AddSub = ARM_AM::sub;
373 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
381 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
386 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue Op, SDValue N,
387 SDValue &Offset, SDValue &Label) {
388 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
389 Offset = N.getOperand(0);
390 SDValue N1 = N.getOperand(1);
391 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
398 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue Op, SDValue N,
399 SDValue &Base, SDValue &Offset){
400 // FIXME dl should come from the parent load or store, not the address
401 DebugLoc dl = Op.getDebugLoc();
402 if (N.getOpcode() != ISD::ADD) {
404 // We must materialize a zero in a reg! Returning a constant here
405 // wouldn't work without additional code to position the node within
406 // ISel's topological ordering in a place where ISel will process it
407 // normally. Instead, just explicitly issue a tMOVri8 node!
408 Offset = SDValue(CurDAG->getTargetNode(ARM::tMOVi8, dl, MVT::i32,
409 CurDAG->getTargetConstant(0, MVT::i32)), 0);
413 Base = N.getOperand(0);
414 Offset = N.getOperand(1);
419 ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDValue Op, SDValue N,
420 unsigned Scale, SDValue &Base,
421 SDValue &OffImm, SDValue &Offset) {
423 SDValue TmpBase, TmpOffImm;
424 if (SelectThumbAddrModeSP(Op, N, TmpBase, TmpOffImm))
425 return false; // We want to select tLDRspi / tSTRspi instead.
426 if (N.getOpcode() == ARMISD::Wrapper &&
427 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
428 return false; // We want to select tLDRpci instead.
431 if (N.getOpcode() != ISD::ADD) {
432 Base = (N.getOpcode() == ARMISD::Wrapper) ? N.getOperand(0) : N;
433 Offset = CurDAG->getRegister(0, MVT::i32);
434 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
438 // Thumb does not have [sp, r] address mode.
439 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
440 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
441 if ((LHSR && LHSR->getReg() == ARM::SP) ||
442 (RHSR && RHSR->getReg() == ARM::SP)) {
444 Offset = CurDAG->getRegister(0, MVT::i32);
445 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
449 // If the RHS is + imm5 * scale, fold into addr mode.
450 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
451 int RHSC = (int)RHS->getZExtValue();
452 if ((RHSC & (Scale-1)) == 0) { // The constant is implicitly multiplied.
454 if (RHSC >= 0 && RHSC < 32) {
455 Base = N.getOperand(0);
456 Offset = CurDAG->getRegister(0, MVT::i32);
457 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
463 Base = N.getOperand(0);
464 Offset = N.getOperand(1);
465 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
469 bool ARMDAGToDAGISel::SelectThumbAddrModeS1(SDValue Op, SDValue N,
470 SDValue &Base, SDValue &OffImm,
472 return SelectThumbAddrModeRI5(Op, N, 1, Base, OffImm, Offset);
475 bool ARMDAGToDAGISel::SelectThumbAddrModeS2(SDValue Op, SDValue N,
476 SDValue &Base, SDValue &OffImm,
478 return SelectThumbAddrModeRI5(Op, N, 2, Base, OffImm, Offset);
481 bool ARMDAGToDAGISel::SelectThumbAddrModeS4(SDValue Op, SDValue N,
482 SDValue &Base, SDValue &OffImm,
484 return SelectThumbAddrModeRI5(Op, N, 4, Base, OffImm, Offset);
487 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue Op, SDValue N,
488 SDValue &Base, SDValue &OffImm) {
489 if (N.getOpcode() == ISD::FrameIndex) {
490 int FI = cast<FrameIndexSDNode>(N)->getIndex();
491 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
492 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
496 if (N.getOpcode() != ISD::ADD)
499 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
500 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
501 (LHSR && LHSR->getReg() == ARM::SP)) {
502 // If the RHS is + imm8 * scale, fold into addr mode.
503 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
504 int RHSC = (int)RHS->getZExtValue();
505 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied.
507 if (RHSC >= 0 && RHSC < 256) {
508 Base = N.getOperand(0);
509 if (Base.getOpcode() == ISD::FrameIndex) {
510 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
511 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
513 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
523 bool ARMDAGToDAGISel::SelectThumb2ShifterOperandReg(SDValue Op,
527 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
529 // Don't match base register only case. That is matched to a separate
530 // lower complexity pattern with explicit register operand.
531 if (ShOpcVal == ARM_AM::no_shift) return false;
533 BaseReg = N.getOperand(0);
534 unsigned ShImmVal = 0;
535 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
536 ShImmVal = RHS->getZExtValue() & 31;
537 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
544 bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue Op,
549 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
551 // Don't match base register only case. That is matched to a separate
552 // lower complexity pattern with explicit register operand.
553 if (ShOpcVal == ARM_AM::no_shift) return false;
555 BaseReg = N.getOperand(0);
556 unsigned ShImmVal = 0;
557 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
558 ShReg = CurDAG->getRegister(0, MVT::i32);
559 ShImmVal = RHS->getZExtValue() & 31;
561 ShReg = N.getOperand(1);
563 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
568 /// getAL - Returns a ARMCC::AL immediate node.
569 static inline SDValue getAL(SelectionDAG *CurDAG) {
570 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
574 SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
575 SDNode *N = Op.getNode();
576 DebugLoc dl = N->getDebugLoc();
578 if (N->isMachineOpcode())
579 return NULL; // Already selected.
581 switch (N->getOpcode()) {
583 case ISD::Constant: {
584 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
586 if (Subtarget->isThumb()) {
587 if (Subtarget->hasThumb2())
588 // Thumb2 has the MOVT instruction, so all immediates can
589 // be done with MOV + MOVT, at worst.
592 UseCP = (Val > 255 && // MOV
593 ~Val > 255 && // MOV + MVN
594 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
596 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
597 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
598 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
601 CurDAG->getTargetConstantPool(ConstantInt::get(Type::Int32Ty, Val),
605 if (Subtarget->isThumb())
606 ResNode = CurDAG->getTargetNode(ARM::tLDRcp, dl, MVT::i32, MVT::Other,
607 CPIdx, CurDAG->getEntryNode());
611 CurDAG->getRegister(0, MVT::i32),
612 CurDAG->getTargetConstant(0, MVT::i32),
614 CurDAG->getRegister(0, MVT::i32),
615 CurDAG->getEntryNode()
617 ResNode=CurDAG->getTargetNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
620 ReplaceUses(Op, SDValue(ResNode, 0));
624 // Other cases are autogenerated.
627 case ISD::FrameIndex: {
628 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
629 int FI = cast<FrameIndexSDNode>(N)->getIndex();
630 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
631 if (Subtarget->isThumb()) {
632 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI,
633 CurDAG->getTargetConstant(0, MVT::i32));
635 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
636 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
637 CurDAG->getRegister(0, MVT::i32) };
638 return CurDAG->SelectNodeTo(N, ARM::ADDri, MVT::i32, Ops, 5);
642 if (!Subtarget->isThumb())
644 // Select add sp, c to tADDhirr.
645 SDValue N0 = Op.getOperand(0);
646 SDValue N1 = Op.getOperand(1);
647 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(Op.getOperand(0));
648 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(Op.getOperand(1));
649 if (LHSR && LHSR->getReg() == ARM::SP) {
651 std::swap(LHSR, RHSR);
653 if (RHSR && RHSR->getReg() == ARM::SP) {
654 SDValue Val = SDValue(CurDAG->getTargetNode(ARM::tMOVlor2hir, dl,
655 Op.getValueType(), N0, N0), 0);
656 return CurDAG->SelectNodeTo(N, ARM::tADDhirr, Op.getValueType(), Val, N1);
661 if (Subtarget->isThumb())
663 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
664 unsigned RHSV = C->getZExtValue();
666 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
667 SDValue V = Op.getOperand(0);
668 unsigned ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, Log2_32(RHSV-1));
669 SDValue Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32),
670 CurDAG->getTargetConstant(ShImm, MVT::i32),
671 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
672 CurDAG->getRegister(0, MVT::i32) };
673 return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7);
675 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
676 SDValue V = Op.getOperand(0);
677 unsigned ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, Log2_32(RHSV+1));
678 SDValue Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32),
679 CurDAG->getTargetConstant(ShImm, MVT::i32),
680 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
681 CurDAG->getRegister(0, MVT::i32) };
682 return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7);
687 return CurDAG->getTargetNode(ARM::FMRRD, dl, MVT::i32, MVT::i32,
688 Op.getOperand(0), getAL(CurDAG),
689 CurDAG->getRegister(0, MVT::i32));
690 case ISD::UMUL_LOHI: {
691 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
692 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
693 CurDAG->getRegister(0, MVT::i32) };
694 return CurDAG->getTargetNode(ARM::UMULL, dl, MVT::i32, MVT::i32, Ops, 5);
696 case ISD::SMUL_LOHI: {
697 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
698 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
699 CurDAG->getRegister(0, MVT::i32) };
700 return CurDAG->getTargetNode(ARM::SMULL, dl, MVT::i32, MVT::i32, Ops, 5);
703 LoadSDNode *LD = cast<LoadSDNode>(Op);
704 ISD::MemIndexedMode AM = LD->getAddressingMode();
705 MVT LoadedVT = LD->getMemoryVT();
706 if (AM != ISD::UNINDEXED) {
707 SDValue Offset, AMOpc;
708 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
711 if (LoadedVT == MVT::i32 &&
712 SelectAddrMode2Offset(Op, LD->getOffset(), Offset, AMOpc)) {
713 Opcode = isPre ? ARM::LDR_PRE : ARM::LDR_POST;
715 } else if (LoadedVT == MVT::i16 &&
716 SelectAddrMode3Offset(Op, LD->getOffset(), Offset, AMOpc)) {
718 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
719 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
720 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
721 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
722 if (LD->getExtensionType() == ISD::SEXTLOAD) {
723 if (SelectAddrMode3Offset(Op, LD->getOffset(), Offset, AMOpc)) {
725 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
728 if (SelectAddrMode2Offset(Op, LD->getOffset(), Offset, AMOpc)) {
730 Opcode = isPre ? ARM::LDRB_PRE : ARM::LDRB_POST;
736 SDValue Chain = LD->getChain();
737 SDValue Base = LD->getBasePtr();
738 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
739 CurDAG->getRegister(0, MVT::i32), Chain };
740 return CurDAG->getTargetNode(Opcode, dl, MVT::i32, MVT::i32,
744 // Other cases are autogenerated.
747 case ARMISD::BRCOND: {
748 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
749 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
750 // Pattern complexity = 6 cost = 1 size = 0
752 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
753 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
754 // Pattern complexity = 6 cost = 1 size = 0
756 unsigned Opc = Subtarget->isThumb() ? ARM::tBcc : ARM::Bcc;
757 SDValue Chain = Op.getOperand(0);
758 SDValue N1 = Op.getOperand(1);
759 SDValue N2 = Op.getOperand(2);
760 SDValue N3 = Op.getOperand(3);
761 SDValue InFlag = Op.getOperand(4);
762 assert(N1.getOpcode() == ISD::BasicBlock);
763 assert(N2.getOpcode() == ISD::Constant);
764 assert(N3.getOpcode() == ISD::Register);
766 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
767 cast<ConstantSDNode>(N2)->getZExtValue()),
769 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
770 SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, MVT::Other,
772 Chain = SDValue(ResNode, 0);
773 if (Op.getNode()->getNumValues() == 2) {
774 InFlag = SDValue(ResNode, 1);
775 ReplaceUses(SDValue(Op.getNode(), 1), InFlag);
777 ReplaceUses(SDValue(Op.getNode(), 0), SDValue(Chain.getNode(), Chain.getResNo()));
781 bool isThumb = Subtarget->isThumb();
782 MVT VT = Op.getValueType();
783 SDValue N0 = Op.getOperand(0);
784 SDValue N1 = Op.getOperand(1);
785 SDValue N2 = Op.getOperand(2);
786 SDValue N3 = Op.getOperand(3);
787 SDValue InFlag = Op.getOperand(4);
788 assert(N2.getOpcode() == ISD::Constant);
789 assert(N3.getOpcode() == ISD::Register);
791 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
792 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
793 // Pattern complexity = 18 cost = 1 size = 0
797 if (!isThumb && VT == MVT::i32 &&
798 SelectShifterOperandReg(Op, N1, CPTmp0, CPTmp1, CPTmp2)) {
799 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
800 cast<ConstantSDNode>(N2)->getZExtValue()),
802 SDValue Ops[] = { N0, CPTmp0, CPTmp1, CPTmp2, Tmp2, N3, InFlag };
803 return CurDAG->SelectNodeTo(Op.getNode(), ARM::MOVCCs, MVT::i32, Ops, 7);
806 // Pattern: (ARMcmov:i32 GPR:i32:$false,
807 // (imm:i32)<<P:Predicate_so_imm>><<X:so_imm_XFORM>>:$true,
809 // Emits: (MOVCCi:i32 GPR:i32:$false,
810 // (so_imm_XFORM:i32 (imm:i32):$true), (imm:i32):$cc)
811 // Pattern complexity = 10 cost = 1 size = 0
812 if (VT == MVT::i32 &&
813 N3.getOpcode() == ISD::Constant &&
814 Predicate_so_imm(N3.getNode())) {
815 SDValue Tmp1 = CurDAG->getTargetConstant(((unsigned)
816 cast<ConstantSDNode>(N1)->getZExtValue()),
818 Tmp1 = Transform_so_imm_XFORM(Tmp1.getNode());
819 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
820 cast<ConstantSDNode>(N2)->getZExtValue()),
822 SDValue Ops[] = { N0, Tmp1, Tmp2, N3, InFlag };
823 return CurDAG->SelectNodeTo(Op.getNode(), ARM::MOVCCi, MVT::i32, Ops, 5);
826 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
827 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
828 // Pattern complexity = 6 cost = 1 size = 0
830 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
831 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
832 // Pattern complexity = 6 cost = 11 size = 0
834 // Also FCPYScc and FCPYDcc.
835 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
836 cast<ConstantSDNode>(N2)->getZExtValue()),
838 SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
840 switch (VT.getSimpleVT()) {
841 default: assert(false && "Illegal conditional move type!");
844 Opc = isThumb ? ARM::tMOVCCr : ARM::MOVCCr;
853 return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5);
856 MVT VT = Op.getValueType();
857 SDValue N0 = Op.getOperand(0);
858 SDValue N1 = Op.getOperand(1);
859 SDValue N2 = Op.getOperand(2);
860 SDValue N3 = Op.getOperand(3);
861 SDValue InFlag = Op.getOperand(4);
862 assert(N2.getOpcode() == ISD::Constant);
863 assert(N3.getOpcode() == ISD::Register);
865 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
866 cast<ConstantSDNode>(N2)->getZExtValue()),
868 SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
870 switch (VT.getSimpleVT()) {
871 default: assert(false && "Illegal conditional move type!");
880 return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5);
884 SDValue Chain = Op.getOperand(0);
885 SDValue N1 = Op.getOperand(1);
886 SDValue N2 = Op.getOperand(2);
887 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N1);
888 // FIXME: handle VLAs.
890 ReplaceUses(Op.getValue(0), Chain);
893 if (N2.getOpcode() == ARMISD::PIC_ADD && isa<LoadSDNode>(N2.getOperand(0)))
894 N2 = N2.getOperand(0);
895 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N2);
897 ReplaceUses(Op.getValue(0), Chain);
900 SDValue BasePtr = Ld->getBasePtr();
901 assert(BasePtr.getOpcode() == ARMISD::Wrapper &&
902 isa<ConstantPoolSDNode>(BasePtr.getOperand(0)) &&
903 "llvm.dbg.variable should be a constantpool node");
904 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(BasePtr.getOperand(0));
906 if (CP->isMachineConstantPoolEntry()) {
907 ARMConstantPoolValue *ACPV = (ARMConstantPoolValue*)CP->getMachineCPVal();
910 GV = dyn_cast<GlobalValue>(CP->getConstVal());
912 ReplaceUses(Op.getValue(0), Chain);
916 SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(),
918 SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
919 SDValue Ops[] = { Tmp1, Tmp2, Chain };
920 return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, dl,
924 case ISD::CONCAT_VECTORS: {
925 MVT VT = Op.getValueType();
926 assert(VT.is128BitVector() && Op.getNumOperands() == 2 &&
927 "unexpected CONCAT_VECTORS");
928 SDValue N0 = Op.getOperand(0);
929 SDValue N1 = Op.getOperand(1);
931 CurDAG->getTargetNode(TargetInstrInfo::IMPLICIT_DEF, dl, VT);
932 if (N0.getOpcode() != ISD::UNDEF)
933 Result = CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl, VT,
934 SDValue(Result, 0), N0,
935 CurDAG->getTargetConstant(arm_dsubreg_0,
937 if (N1.getOpcode() != ISD::UNDEF)
938 Result = CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl, VT,
939 SDValue(Result, 0), N1,
940 CurDAG->getTargetConstant(arm_dsubreg_1,
945 case ISD::VECTOR_SHUFFLE: {
946 MVT VT = Op.getValueType();
948 // Match 128-bit splat to VDUPLANEQ. (This could be done with a Pat in
949 // ARMInstrNEON.td but it is awkward because the shuffle mask needs to be
950 // transformed first into a lane number and then to both a subregister
951 // index and an adjusted lane number.) If the source operand is a
952 // SCALAR_TO_VECTOR, leave it so it will be matched later as a VDUP.
953 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
954 if (VT.is128BitVector() && SVOp->isSplat() &&
955 Op.getOperand(0).getOpcode() != ISD::SCALAR_TO_VECTOR &&
956 Op.getOperand(1).getOpcode() == ISD::UNDEF) {
957 unsigned LaneVal = SVOp->getSplatIndex();
961 switch (VT.getVectorElementType().getSimpleVT()) {
962 default: assert(false && "unhandled VDUP splat type");
963 case MVT::i8: Opc = ARM::VDUPLN8q; HalfVT = MVT::v8i8; break;
964 case MVT::i16: Opc = ARM::VDUPLN16q; HalfVT = MVT::v4i16; break;
965 case MVT::i32: Opc = ARM::VDUPLN32q; HalfVT = MVT::v2i32; break;
966 case MVT::f32: Opc = ARM::VDUPLNfq; HalfVT = MVT::v2f32; break;
969 // The source operand needs to be changed to a subreg of the original
970 // 128-bit operand, and the lane number needs to be adjusted accordingly.
971 unsigned NumElts = VT.getVectorNumElements() / 2;
972 unsigned SRVal = (LaneVal < NumElts ? arm_dsubreg_0 : arm_dsubreg_1);
973 SDValue SR = CurDAG->getTargetConstant(SRVal, MVT::i32);
974 SDValue NewLane = CurDAG->getTargetConstant(LaneVal % NumElts, MVT::i32);
975 SDNode *SubReg = CurDAG->getTargetNode(TargetInstrInfo::EXTRACT_SUBREG,
976 dl, HalfVT, N->getOperand(0), SR);
977 return CurDAG->SelectNodeTo(N, Opc, VT, SDValue(SubReg, 0), NewLane);
984 return SelectCode(Op);
987 bool ARMDAGToDAGISel::
988 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
989 std::vector<SDValue> &OutOps) {
990 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
992 SDValue Base, Offset, Opc;
993 if (!SelectAddrMode2(Op, Op, Base, Offset, Opc))
996 OutOps.push_back(Base);
997 OutOps.push_back(Offset);
998 OutOps.push_back(Opc);
1002 /// createARMISelDag - This pass converts a legalized DAG into a
1003 /// ARM-specific DAG, ready for instruction scheduling.
1005 FunctionPass *llvm::createARMISelDag(ARMTargetMachine &TM) {
1006 return new ARMDAGToDAGISel(TM);