1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
15 #include "ARMAddressingModes.h"
16 #include "ARMConstantPoolValue.h"
17 #include "ARMISelLowering.h"
18 #include "ARMTargetMachine.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/SelectionDAGISel.h"
29 #include "llvm/Target/TargetLowering.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/Support/Compiler.h"
32 #include "llvm/Support/Debug.h"
35 static const unsigned arm_dsubreg_0 = 5;
36 static const unsigned arm_dsubreg_1 = 6;
38 //===--------------------------------------------------------------------===//
39 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
40 /// instructions for SelectionDAG operations.
43 class ARMDAGToDAGISel : public SelectionDAGISel {
44 ARMBaseTargetMachine &TM;
46 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
47 /// make the right decision when generating code for different targets.
48 const ARMSubtarget *Subtarget;
51 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm)
52 : SelectionDAGISel(tm), TM(tm),
53 Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
56 virtual const char *getPassName() const {
57 return "ARM Instruction Selection";
60 /// getI32Imm - Return a target constant with the specified value, of type i32.
61 inline SDValue getI32Imm(unsigned Imm) {
62 return CurDAG->getTargetConstant(Imm, MVT::i32);
65 SDNode *Select(SDValue Op);
66 virtual void InstructionSelect();
67 bool SelectShifterOperandReg(SDValue Op, SDValue N, SDValue &A,
68 SDValue &B, SDValue &C);
69 bool SelectAddrMode2(SDValue Op, SDValue N, SDValue &Base,
70 SDValue &Offset, SDValue &Opc);
71 bool SelectAddrMode2Offset(SDValue Op, SDValue N,
72 SDValue &Offset, SDValue &Opc);
73 bool SelectAddrMode3(SDValue Op, SDValue N, SDValue &Base,
74 SDValue &Offset, SDValue &Opc);
75 bool SelectAddrMode3Offset(SDValue Op, SDValue N,
76 SDValue &Offset, SDValue &Opc);
77 bool SelectAddrMode5(SDValue Op, SDValue N, SDValue &Base,
80 bool SelectAddrModePC(SDValue Op, SDValue N, SDValue &Offset,
83 bool SelectThumbAddrModeRR(SDValue Op, SDValue N, SDValue &Base,
85 bool SelectThumbAddrModeRI5(SDValue Op, SDValue N, unsigned Scale,
86 SDValue &Base, SDValue &OffImm,
88 bool SelectThumbAddrModeS1(SDValue Op, SDValue N, SDValue &Base,
89 SDValue &OffImm, SDValue &Offset);
90 bool SelectThumbAddrModeS2(SDValue Op, SDValue N, SDValue &Base,
91 SDValue &OffImm, SDValue &Offset);
92 bool SelectThumbAddrModeS4(SDValue Op, SDValue N, SDValue &Base,
93 SDValue &OffImm, SDValue &Offset);
94 bool SelectThumbAddrModeSP(SDValue Op, SDValue N, SDValue &Base,
97 bool SelectT2ShifterOperandReg(SDValue Op, SDValue N,
98 SDValue &BaseReg, SDValue &Opc);
99 bool SelectT2AddrModeImm12(SDValue Op, SDValue N, SDValue &Base,
101 bool SelectT2AddrModeImm8(SDValue Op, SDValue N, SDValue &Base,
103 bool SelectT2AddrModeImm8s4(SDValue Op, SDValue N, SDValue &Base,
105 bool SelectT2AddrModeSoReg(SDValue Op, SDValue N, SDValue &Base,
106 SDValue &OffReg, SDValue &ShImm);
109 // Include the pieces autogenerated from the target description.
110 #include "ARMGenDAGISel.inc"
113 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
114 /// inline asm expressions.
115 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
117 std::vector<SDValue> &OutOps);
121 void ARMDAGToDAGISel::InstructionSelect() {
125 CurDAG->RemoveDeadNodes();
128 bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue Op,
133 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
135 // Don't match base register only case. That is matched to a separate
136 // lower complexity pattern with explicit register operand.
137 if (ShOpcVal == ARM_AM::no_shift) return false;
139 BaseReg = N.getOperand(0);
140 unsigned ShImmVal = 0;
141 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
142 ShReg = CurDAG->getRegister(0, MVT::i32);
143 ShImmVal = RHS->getZExtValue() & 31;
145 ShReg = N.getOperand(1);
147 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
152 bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
153 SDValue &Base, SDValue &Offset,
155 if (N.getOpcode() == ISD::MUL) {
156 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
157 // X * [3,5,9] -> X + X * [2,4,8] etc.
158 int RHSC = (int)RHS->getZExtValue();
161 ARM_AM::AddrOpc AddSub = ARM_AM::add;
163 AddSub = ARM_AM::sub;
166 if (isPowerOf2_32(RHSC)) {
167 unsigned ShAmt = Log2_32(RHSC);
168 Base = Offset = N.getOperand(0);
169 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
178 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
180 if (N.getOpcode() == ISD::FrameIndex) {
181 int FI = cast<FrameIndexSDNode>(N)->getIndex();
182 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
183 } else if (N.getOpcode() == ARMISD::Wrapper) {
184 Base = N.getOperand(0);
186 Offset = CurDAG->getRegister(0, MVT::i32);
187 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
193 // Match simple R +/- imm12 operands.
194 if (N.getOpcode() == ISD::ADD)
195 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
196 int RHSC = (int)RHS->getZExtValue();
197 if ((RHSC >= 0 && RHSC < 0x1000) ||
198 (RHSC < 0 && RHSC > -0x1000)) { // 12 bits.
199 Base = N.getOperand(0);
200 if (Base.getOpcode() == ISD::FrameIndex) {
201 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
202 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
204 Offset = CurDAG->getRegister(0, MVT::i32);
206 ARM_AM::AddrOpc AddSub = ARM_AM::add;
208 AddSub = ARM_AM::sub;
211 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
218 // Otherwise this is R +/- [possibly shifted] R
219 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub;
220 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
223 Base = N.getOperand(0);
224 Offset = N.getOperand(1);
226 if (ShOpcVal != ARM_AM::no_shift) {
227 // Check to see if the RHS of the shift is a constant, if not, we can't fold
229 if (ConstantSDNode *Sh =
230 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
231 ShAmt = Sh->getZExtValue();
232 Offset = N.getOperand(1).getOperand(0);
234 ShOpcVal = ARM_AM::no_shift;
238 // Try matching (R shl C) + (R).
239 if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift) {
240 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
241 if (ShOpcVal != ARM_AM::no_shift) {
242 // Check to see if the RHS of the shift is a constant, if not, we can't
244 if (ConstantSDNode *Sh =
245 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
246 ShAmt = Sh->getZExtValue();
247 Offset = N.getOperand(0).getOperand(0);
248 Base = N.getOperand(1);
250 ShOpcVal = ARM_AM::no_shift;
255 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
260 bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDValue Op, SDValue N,
261 SDValue &Offset, SDValue &Opc) {
262 unsigned Opcode = Op.getOpcode();
263 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
264 ? cast<LoadSDNode>(Op)->getAddressingMode()
265 : cast<StoreSDNode>(Op)->getAddressingMode();
266 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
267 ? ARM_AM::add : ARM_AM::sub;
268 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
269 int Val = (int)C->getZExtValue();
270 if (Val >= 0 && Val < 0x1000) { // 12 bits.
271 Offset = CurDAG->getRegister(0, MVT::i32);
272 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
280 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
282 if (ShOpcVal != ARM_AM::no_shift) {
283 // Check to see if the RHS of the shift is a constant, if not, we can't fold
285 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
286 ShAmt = Sh->getZExtValue();
287 Offset = N.getOperand(0);
289 ShOpcVal = ARM_AM::no_shift;
293 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
299 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N,
300 SDValue &Base, SDValue &Offset,
302 if (N.getOpcode() == ISD::SUB) {
303 // X - C is canonicalize to X + -C, no need to handle it here.
304 Base = N.getOperand(0);
305 Offset = N.getOperand(1);
306 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
310 if (N.getOpcode() != ISD::ADD) {
312 if (N.getOpcode() == ISD::FrameIndex) {
313 int FI = cast<FrameIndexSDNode>(N)->getIndex();
314 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
316 Offset = CurDAG->getRegister(0, MVT::i32);
317 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
321 // If the RHS is +/- imm8, fold into addr mode.
322 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
323 int RHSC = (int)RHS->getZExtValue();
324 if ((RHSC >= 0 && RHSC < 256) ||
325 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
326 Base = N.getOperand(0);
327 if (Base.getOpcode() == ISD::FrameIndex) {
328 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
329 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
331 Offset = CurDAG->getRegister(0, MVT::i32);
333 ARM_AM::AddrOpc AddSub = ARM_AM::add;
335 AddSub = ARM_AM::sub;
338 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
343 Base = N.getOperand(0);
344 Offset = N.getOperand(1);
345 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
349 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDValue Op, SDValue N,
350 SDValue &Offset, SDValue &Opc) {
351 unsigned Opcode = Op.getOpcode();
352 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
353 ? cast<LoadSDNode>(Op)->getAddressingMode()
354 : cast<StoreSDNode>(Op)->getAddressingMode();
355 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
356 ? ARM_AM::add : ARM_AM::sub;
357 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
358 int Val = (int)C->getZExtValue();
359 if (Val >= 0 && Val < 256) {
360 Offset = CurDAG->getRegister(0, MVT::i32);
361 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
367 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
372 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N,
373 SDValue &Base, SDValue &Offset) {
374 if (N.getOpcode() != ISD::ADD) {
376 if (N.getOpcode() == ISD::FrameIndex) {
377 int FI = cast<FrameIndexSDNode>(N)->getIndex();
378 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
379 } else if (N.getOpcode() == ARMISD::Wrapper) {
380 Base = N.getOperand(0);
382 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
387 // If the RHS is +/- imm8, fold into addr mode.
388 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
389 int RHSC = (int)RHS->getZExtValue();
390 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied by 4.
392 if ((RHSC >= 0 && RHSC < 256) ||
393 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
394 Base = N.getOperand(0);
395 if (Base.getOpcode() == ISD::FrameIndex) {
396 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
397 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
400 ARM_AM::AddrOpc AddSub = ARM_AM::add;
402 AddSub = ARM_AM::sub;
405 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
413 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
418 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue Op, SDValue N,
419 SDValue &Offset, SDValue &Label) {
420 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
421 Offset = N.getOperand(0);
422 SDValue N1 = N.getOperand(1);
423 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
430 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue Op, SDValue N,
431 SDValue &Base, SDValue &Offset){
432 // FIXME dl should come from the parent load or store, not the address
433 DebugLoc dl = Op.getDebugLoc();
434 if (N.getOpcode() != ISD::ADD) {
436 // We must materialize a zero in a reg! Returning a constant here
437 // wouldn't work without additional code to position the node within
438 // ISel's topological ordering in a place where ISel will process it
439 // normally. Instead, just explicitly issue a tMOVri8 node!
440 Offset = SDValue(CurDAG->getTargetNode(ARM::tMOVi8, dl, MVT::i32,
441 CurDAG->getTargetConstant(0, MVT::i32)), 0);
445 Base = N.getOperand(0);
446 Offset = N.getOperand(1);
451 ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDValue Op, SDValue N,
452 unsigned Scale, SDValue &Base,
453 SDValue &OffImm, SDValue &Offset) {
455 SDValue TmpBase, TmpOffImm;
456 if (SelectThumbAddrModeSP(Op, N, TmpBase, TmpOffImm))
457 return false; // We want to select tLDRspi / tSTRspi instead.
458 if (N.getOpcode() == ARMISD::Wrapper &&
459 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
460 return false; // We want to select tLDRpci instead.
463 if (N.getOpcode() != ISD::ADD) {
464 Base = (N.getOpcode() == ARMISD::Wrapper) ? N.getOperand(0) : N;
465 Offset = CurDAG->getRegister(0, MVT::i32);
466 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
470 // Thumb does not have [sp, r] address mode.
471 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
472 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
473 if ((LHSR && LHSR->getReg() == ARM::SP) ||
474 (RHSR && RHSR->getReg() == ARM::SP)) {
476 Offset = CurDAG->getRegister(0, MVT::i32);
477 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
481 // If the RHS is + imm5 * scale, fold into addr mode.
482 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
483 int RHSC = (int)RHS->getZExtValue();
484 if ((RHSC & (Scale-1)) == 0) { // The constant is implicitly multiplied.
486 if (RHSC >= 0 && RHSC < 32) {
487 Base = N.getOperand(0);
488 Offset = CurDAG->getRegister(0, MVT::i32);
489 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
495 Base = N.getOperand(0);
496 Offset = N.getOperand(1);
497 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
501 bool ARMDAGToDAGISel::SelectThumbAddrModeS1(SDValue Op, SDValue N,
502 SDValue &Base, SDValue &OffImm,
504 return SelectThumbAddrModeRI5(Op, N, 1, Base, OffImm, Offset);
507 bool ARMDAGToDAGISel::SelectThumbAddrModeS2(SDValue Op, SDValue N,
508 SDValue &Base, SDValue &OffImm,
510 return SelectThumbAddrModeRI5(Op, N, 2, Base, OffImm, Offset);
513 bool ARMDAGToDAGISel::SelectThumbAddrModeS4(SDValue Op, SDValue N,
514 SDValue &Base, SDValue &OffImm,
516 return SelectThumbAddrModeRI5(Op, N, 4, Base, OffImm, Offset);
519 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue Op, SDValue N,
520 SDValue &Base, SDValue &OffImm) {
521 if (N.getOpcode() == ISD::FrameIndex) {
522 int FI = cast<FrameIndexSDNode>(N)->getIndex();
523 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
524 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
528 if (N.getOpcode() != ISD::ADD)
531 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
532 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
533 (LHSR && LHSR->getReg() == ARM::SP)) {
534 // If the RHS is + imm8 * scale, fold into addr mode.
535 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
536 int RHSC = (int)RHS->getZExtValue();
537 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied.
539 if (RHSC >= 0 && RHSC < 256) {
540 Base = N.getOperand(0);
541 if (Base.getOpcode() == ISD::FrameIndex) {
542 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
543 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
545 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
555 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue Op, SDValue N,
558 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
560 // Don't match base register only case. That is matched to a separate
561 // lower complexity pattern with explicit register operand.
562 if (ShOpcVal == ARM_AM::no_shift) return false;
564 BaseReg = N.getOperand(0);
565 unsigned ShImmVal = 0;
566 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
567 ShImmVal = RHS->getZExtValue() & 31;
568 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
575 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue Op, SDValue N,
576 SDValue &Base, SDValue &OffImm) {
577 // Match simple R + imm12 operands.
578 if (N.getOpcode() != ISD::ADD)
581 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
582 int RHSC = (int)RHS->getZExtValue();
583 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits.
584 Base = N.getOperand(0);
585 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
593 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue Op, SDValue N,
594 SDValue &Base, SDValue &OffImm) {
595 if (N.getOpcode() == ISD::ADD) {
596 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
597 int RHSC = (int)RHS->getZExtValue();
598 if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
599 Base = N.getOperand(0);
600 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
604 } else if (N.getOpcode() == ISD::SUB) {
605 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
606 int RHSC = (int)RHS->getZExtValue();
607 if (RHSC >= 0 && RHSC < 0x100) { // 8 bits.
608 Base = N.getOperand(0);
609 OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32);
618 bool ARMDAGToDAGISel::SelectT2AddrModeImm8s4(SDValue Op, SDValue N,
619 SDValue &Base, SDValue &OffImm) {
620 if (N.getOpcode() == ISD::ADD) {
621 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
622 int RHSC = (int)RHS->getZExtValue();
623 if (((RHSC & 0x3) == 0) && (RHSC < 0 && RHSC > -0x400)) { // 8 bits.
624 Base = N.getOperand(0);
625 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
629 } else if (N.getOpcode() == ISD::SUB) {
630 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
631 int RHSC = (int)RHS->getZExtValue();
632 if (((RHSC & 0x3) == 0) && (RHSC >= 0 && RHSC < 0x400)) { // 8 bits.
633 Base = N.getOperand(0);
634 OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32);
643 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue Op, SDValue N,
645 SDValue &OffReg, SDValue &ShImm) {
647 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
649 if (N.getOpcode() == ISD::FrameIndex) {
650 int FI = cast<FrameIndexSDNode>(N)->getIndex();
651 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
652 } else if (N.getOpcode() == ARMISD::Wrapper) {
653 Base = N.getOperand(0);
654 if (Base.getOpcode() == ISD::TargetConstantPool)
655 return false; // We want to select t2LDRpci instead.
657 OffReg = CurDAG->getRegister(0, MVT::i32);
658 ShImm = CurDAG->getTargetConstant(0, MVT::i32);
662 // Look for (R + R) or (R + (R << [1,2,3])).
664 Base = N.getOperand(0);
665 OffReg = N.getOperand(1);
667 // Swap if it is ((R << c) + R).
668 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg);
669 if (ShOpcVal != ARM_AM::lsl) {
670 ShOpcVal = ARM_AM::getShiftOpcForNode(Base);
671 if (ShOpcVal == ARM_AM::lsl)
672 std::swap(Base, OffReg);
675 if (ShOpcVal == ARM_AM::lsl) {
676 // Check to see if the RHS of the shift is a constant, if not, we can't fold
678 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
679 ShAmt = Sh->getZExtValue();
682 ShOpcVal = ARM_AM::no_shift;
684 OffReg = OffReg.getOperand(0);
686 ShOpcVal = ARM_AM::no_shift;
688 } else if (SelectT2AddrModeImm12(Op, N, Base, ShImm) ||
689 SelectT2AddrModeImm8 (Op, N, Base, ShImm))
690 // Don't match if it's possible to match to one of the r +/- imm cases.
693 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
698 //===--------------------------------------------------------------------===//
700 /// getAL - Returns a ARMCC::AL immediate node.
701 static inline SDValue getAL(SelectionDAG *CurDAG) {
702 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
706 SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
707 SDNode *N = Op.getNode();
708 DebugLoc dl = N->getDebugLoc();
710 if (N->isMachineOpcode())
711 return NULL; // Already selected.
713 switch (N->getOpcode()) {
715 case ISD::Constant: {
716 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
718 if (Subtarget->isThumb()) {
719 if (Subtarget->hasThumb2())
720 // Thumb2 has the MOVT instruction, so all immediates can
721 // be done with MOV + MOVT, at worst.
724 UseCP = (Val > 255 && // MOV
725 ~Val > 255 && // MOV + MVN
726 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
728 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
729 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
730 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
733 CurDAG->getTargetConstantPool(ConstantInt::get(Type::Int32Ty, Val),
737 if (Subtarget->isThumb())
738 ResNode = CurDAG->getTargetNode(ARM::tLDRcp, dl, MVT::i32, MVT::Other,
739 CPIdx, CurDAG->getEntryNode());
743 CurDAG->getRegister(0, MVT::i32),
744 CurDAG->getTargetConstant(0, MVT::i32),
746 CurDAG->getRegister(0, MVT::i32),
747 CurDAG->getEntryNode()
749 ResNode=CurDAG->getTargetNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
752 ReplaceUses(Op, SDValue(ResNode, 0));
756 // Other cases are autogenerated.
759 case ISD::FrameIndex: {
760 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
761 int FI = cast<FrameIndexSDNode>(N)->getIndex();
762 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
763 if (Subtarget->isThumb()) {
764 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI,
765 CurDAG->getTargetConstant(0, MVT::i32));
767 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
768 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
769 CurDAG->getRegister(0, MVT::i32) };
770 return CurDAG->SelectNodeTo(N, ARM::ADDri, MVT::i32, Ops, 5);
774 if (!Subtarget->isThumb())
776 // Select add sp, c to tADDhirr.
777 SDValue N0 = Op.getOperand(0);
778 SDValue N1 = Op.getOperand(1);
779 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(Op.getOperand(0));
780 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(Op.getOperand(1));
781 if (LHSR && LHSR->getReg() == ARM::SP) {
783 std::swap(LHSR, RHSR);
785 if (RHSR && RHSR->getReg() == ARM::SP) {
786 SDValue Val = SDValue(CurDAG->getTargetNode(ARM::tMOVlor2hir, dl,
787 Op.getValueType(), N0, N0), 0);
788 return CurDAG->SelectNodeTo(N, ARM::tADDhirr, Op.getValueType(), Val, N1);
793 if (Subtarget->isThumb())
795 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
796 unsigned RHSV = C->getZExtValue();
798 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
799 SDValue V = Op.getOperand(0);
800 unsigned ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, Log2_32(RHSV-1));
801 SDValue Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32),
802 CurDAG->getTargetConstant(ShImm, MVT::i32),
803 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
804 CurDAG->getRegister(0, MVT::i32) };
805 return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7);
807 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
808 SDValue V = Op.getOperand(0);
809 unsigned ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, Log2_32(RHSV+1));
810 SDValue Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32),
811 CurDAG->getTargetConstant(ShImm, MVT::i32),
812 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
813 CurDAG->getRegister(0, MVT::i32) };
814 return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7);
819 return CurDAG->getTargetNode(ARM::FMRRD, dl, MVT::i32, MVT::i32,
820 Op.getOperand(0), getAL(CurDAG),
821 CurDAG->getRegister(0, MVT::i32));
822 case ISD::UMUL_LOHI: {
823 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
824 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
825 CurDAG->getRegister(0, MVT::i32) };
826 return CurDAG->getTargetNode(ARM::UMULL, dl, MVT::i32, MVT::i32, Ops, 5);
828 case ISD::SMUL_LOHI: {
829 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
830 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
831 CurDAG->getRegister(0, MVT::i32) };
832 return CurDAG->getTargetNode(ARM::SMULL, dl, MVT::i32, MVT::i32, Ops, 5);
835 LoadSDNode *LD = cast<LoadSDNode>(Op);
836 ISD::MemIndexedMode AM = LD->getAddressingMode();
837 MVT LoadedVT = LD->getMemoryVT();
838 if (AM != ISD::UNINDEXED) {
839 SDValue Offset, AMOpc;
840 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
843 if (LoadedVT == MVT::i32 &&
844 SelectAddrMode2Offset(Op, LD->getOffset(), Offset, AMOpc)) {
845 Opcode = isPre ? ARM::LDR_PRE : ARM::LDR_POST;
847 } else if (LoadedVT == MVT::i16 &&
848 SelectAddrMode3Offset(Op, LD->getOffset(), Offset, AMOpc)) {
850 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
851 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
852 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
853 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
854 if (LD->getExtensionType() == ISD::SEXTLOAD) {
855 if (SelectAddrMode3Offset(Op, LD->getOffset(), Offset, AMOpc)) {
857 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
860 if (SelectAddrMode2Offset(Op, LD->getOffset(), Offset, AMOpc)) {
862 Opcode = isPre ? ARM::LDRB_PRE : ARM::LDRB_POST;
868 SDValue Chain = LD->getChain();
869 SDValue Base = LD->getBasePtr();
870 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
871 CurDAG->getRegister(0, MVT::i32), Chain };
872 return CurDAG->getTargetNode(Opcode, dl, MVT::i32, MVT::i32,
876 // Other cases are autogenerated.
879 case ARMISD::BRCOND: {
880 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
881 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
882 // Pattern complexity = 6 cost = 1 size = 0
884 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
885 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
886 // Pattern complexity = 6 cost = 1 size = 0
888 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
889 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
890 // Pattern complexity = 6 cost = 1 size = 0
892 unsigned Opc = Subtarget->isThumb() ?
893 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
894 SDValue Chain = Op.getOperand(0);
895 SDValue N1 = Op.getOperand(1);
896 SDValue N2 = Op.getOperand(2);
897 SDValue N3 = Op.getOperand(3);
898 SDValue InFlag = Op.getOperand(4);
899 assert(N1.getOpcode() == ISD::BasicBlock);
900 assert(N2.getOpcode() == ISD::Constant);
901 assert(N3.getOpcode() == ISD::Register);
903 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
904 cast<ConstantSDNode>(N2)->getZExtValue()),
906 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
907 SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, MVT::Other,
909 Chain = SDValue(ResNode, 0);
910 if (Op.getNode()->getNumValues() == 2) {
911 InFlag = SDValue(ResNode, 1);
912 ReplaceUses(SDValue(Op.getNode(), 1), InFlag);
914 ReplaceUses(SDValue(Op.getNode(), 0), SDValue(Chain.getNode(), Chain.getResNo()));
918 bool isThumb = Subtarget->isThumb();
919 MVT VT = Op.getValueType();
920 SDValue N0 = Op.getOperand(0);
921 SDValue N1 = Op.getOperand(1);
922 SDValue N2 = Op.getOperand(2);
923 SDValue N3 = Op.getOperand(3);
924 SDValue InFlag = Op.getOperand(4);
925 assert(N2.getOpcode() == ISD::Constant);
926 assert(N3.getOpcode() == ISD::Register);
928 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
929 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
930 // Pattern complexity = 18 cost = 1 size = 0
934 if (!isThumb && VT == MVT::i32 &&
935 SelectShifterOperandReg(Op, N1, CPTmp0, CPTmp1, CPTmp2)) {
936 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
937 cast<ConstantSDNode>(N2)->getZExtValue()),
939 SDValue Ops[] = { N0, CPTmp0, CPTmp1, CPTmp2, Tmp2, N3, InFlag };
940 return CurDAG->SelectNodeTo(Op.getNode(), ARM::MOVCCs, MVT::i32, Ops, 7);
943 // Pattern: (ARMcmov:i32 GPR:i32:$false,
944 // (imm:i32)<<P:Predicate_so_imm>><<X:so_imm_XFORM>>:$true,
946 // Emits: (MOVCCi:i32 GPR:i32:$false,
947 // (so_imm_XFORM:i32 (imm:i32):$true), (imm:i32):$cc)
948 // Pattern complexity = 10 cost = 1 size = 0
949 if (VT == MVT::i32 &&
950 N3.getOpcode() == ISD::Constant &&
951 Predicate_so_imm(N3.getNode())) {
952 SDValue Tmp1 = CurDAG->getTargetConstant(((unsigned)
953 cast<ConstantSDNode>(N1)->getZExtValue()),
955 Tmp1 = Transform_so_imm_XFORM(Tmp1.getNode());
956 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
957 cast<ConstantSDNode>(N2)->getZExtValue()),
959 SDValue Ops[] = { N0, Tmp1, Tmp2, N3, InFlag };
960 return CurDAG->SelectNodeTo(Op.getNode(), ARM::MOVCCi, MVT::i32, Ops, 5);
963 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
964 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
965 // Pattern complexity = 6 cost = 1 size = 0
967 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
968 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
969 // Pattern complexity = 6 cost = 11 size = 0
971 // Also FCPYScc and FCPYDcc.
972 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
973 cast<ConstantSDNode>(N2)->getZExtValue()),
975 SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
977 switch (VT.getSimpleVT()) {
978 default: assert(false && "Illegal conditional move type!");
981 Opc = isThumb ? ARM::tMOVCCr : ARM::MOVCCr;
990 return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5);
993 MVT VT = Op.getValueType();
994 SDValue N0 = Op.getOperand(0);
995 SDValue N1 = Op.getOperand(1);
996 SDValue N2 = Op.getOperand(2);
997 SDValue N3 = Op.getOperand(3);
998 SDValue InFlag = Op.getOperand(4);
999 assert(N2.getOpcode() == ISD::Constant);
1000 assert(N3.getOpcode() == ISD::Register);
1002 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1003 cast<ConstantSDNode>(N2)->getZExtValue()),
1005 SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
1007 switch (VT.getSimpleVT()) {
1008 default: assert(false && "Illegal conditional move type!");
1017 return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5);
1020 case ISD::DECLARE: {
1021 SDValue Chain = Op.getOperand(0);
1022 SDValue N1 = Op.getOperand(1);
1023 SDValue N2 = Op.getOperand(2);
1024 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N1);
1025 // FIXME: handle VLAs.
1027 ReplaceUses(Op.getValue(0), Chain);
1030 if (N2.getOpcode() == ARMISD::PIC_ADD && isa<LoadSDNode>(N2.getOperand(0)))
1031 N2 = N2.getOperand(0);
1032 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N2);
1034 ReplaceUses(Op.getValue(0), Chain);
1037 SDValue BasePtr = Ld->getBasePtr();
1038 assert(BasePtr.getOpcode() == ARMISD::Wrapper &&
1039 isa<ConstantPoolSDNode>(BasePtr.getOperand(0)) &&
1040 "llvm.dbg.variable should be a constantpool node");
1041 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(BasePtr.getOperand(0));
1042 GlobalValue *GV = 0;
1043 if (CP->isMachineConstantPoolEntry()) {
1044 ARMConstantPoolValue *ACPV = (ARMConstantPoolValue*)CP->getMachineCPVal();
1047 GV = dyn_cast<GlobalValue>(CP->getConstVal());
1049 ReplaceUses(Op.getValue(0), Chain);
1053 SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(),
1054 TLI.getPointerTy());
1055 SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
1056 SDValue Ops[] = { Tmp1, Tmp2, Chain };
1057 return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, dl,
1058 MVT::Other, Ops, 3);
1061 case ISD::CONCAT_VECTORS: {
1062 MVT VT = Op.getValueType();
1063 assert(VT.is128BitVector() && Op.getNumOperands() == 2 &&
1064 "unexpected CONCAT_VECTORS");
1065 SDValue N0 = Op.getOperand(0);
1066 SDValue N1 = Op.getOperand(1);
1068 CurDAG->getTargetNode(TargetInstrInfo::IMPLICIT_DEF, dl, VT);
1069 if (N0.getOpcode() != ISD::UNDEF)
1070 Result = CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl, VT,
1071 SDValue(Result, 0), N0,
1072 CurDAG->getTargetConstant(arm_dsubreg_0,
1074 if (N1.getOpcode() != ISD::UNDEF)
1075 Result = CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl, VT,
1076 SDValue(Result, 0), N1,
1077 CurDAG->getTargetConstant(arm_dsubreg_1,
1082 case ISD::VECTOR_SHUFFLE: {
1083 MVT VT = Op.getValueType();
1085 // Match 128-bit splat to VDUPLANEQ. (This could be done with a Pat in
1086 // ARMInstrNEON.td but it is awkward because the shuffle mask needs to be
1087 // transformed first into a lane number and then to both a subregister
1088 // index and an adjusted lane number.) If the source operand is a
1089 // SCALAR_TO_VECTOR, leave it so it will be matched later as a VDUP.
1090 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1091 if (VT.is128BitVector() && SVOp->isSplat() &&
1092 Op.getOperand(0).getOpcode() != ISD::SCALAR_TO_VECTOR &&
1093 Op.getOperand(1).getOpcode() == ISD::UNDEF) {
1094 unsigned LaneVal = SVOp->getSplatIndex();
1098 switch (VT.getVectorElementType().getSimpleVT()) {
1099 default: assert(false && "unhandled VDUP splat type");
1100 case MVT::i8: Opc = ARM::VDUPLN8q; HalfVT = MVT::v8i8; break;
1101 case MVT::i16: Opc = ARM::VDUPLN16q; HalfVT = MVT::v4i16; break;
1102 case MVT::i32: Opc = ARM::VDUPLN32q; HalfVT = MVT::v2i32; break;
1103 case MVT::f32: Opc = ARM::VDUPLNfq; HalfVT = MVT::v2f32; break;
1106 // The source operand needs to be changed to a subreg of the original
1107 // 128-bit operand, and the lane number needs to be adjusted accordingly.
1108 unsigned NumElts = VT.getVectorNumElements() / 2;
1109 unsigned SRVal = (LaneVal < NumElts ? arm_dsubreg_0 : arm_dsubreg_1);
1110 SDValue SR = CurDAG->getTargetConstant(SRVal, MVT::i32);
1111 SDValue NewLane = CurDAG->getTargetConstant(LaneVal % NumElts, MVT::i32);
1112 SDNode *SubReg = CurDAG->getTargetNode(TargetInstrInfo::EXTRACT_SUBREG,
1113 dl, HalfVT, N->getOperand(0), SR);
1114 return CurDAG->SelectNodeTo(N, Opc, VT, SDValue(SubReg, 0), NewLane);
1121 return SelectCode(Op);
1124 bool ARMDAGToDAGISel::
1125 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
1126 std::vector<SDValue> &OutOps) {
1127 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
1129 SDValue Base, Offset, Opc;
1130 if (!SelectAddrMode2(Op, Op, Base, Offset, Opc))
1133 OutOps.push_back(Base);
1134 OutOps.push_back(Offset);
1135 OutOps.push_back(Opc);
1139 /// createARMISelDag - This pass converts a legalized DAG into a
1140 /// ARM-specific DAG, ready for instruction scheduling.
1142 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM) {
1143 return new ARMDAGToDAGISel(TM);