1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
15 #include "ARMAddressingModes.h"
16 #include "ARMConstantPoolValue.h"
17 #include "ARMISelLowering.h"
18 #include "ARMTargetMachine.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/SelectionDAGISel.h"
29 #include "llvm/Target/TargetLowering.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/Support/Compiler.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
38 static const unsigned arm_dsubreg_0 = 5;
39 static const unsigned arm_dsubreg_1 = 6;
41 //===--------------------------------------------------------------------===//
42 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
43 /// instructions for SelectionDAG operations.
46 class ARMDAGToDAGISel : public SelectionDAGISel {
47 ARMBaseTargetMachine &TM;
49 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
50 /// make the right decision when generating code for different targets.
51 const ARMSubtarget *Subtarget;
54 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm)
55 : SelectionDAGISel(tm), TM(tm),
56 Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
59 virtual const char *getPassName() const {
60 return "ARM Instruction Selection";
63 /// getI32Imm - Return a target constant with the specified value, of type i32.
64 inline SDValue getI32Imm(unsigned Imm) {
65 return CurDAG->getTargetConstant(Imm, MVT::i32);
68 SDNode *Select(SDValue Op);
69 virtual void InstructionSelect();
70 bool SelectShifterOperandReg(SDValue Op, SDValue N, SDValue &A,
71 SDValue &B, SDValue &C);
72 bool SelectAddrMode2(SDValue Op, SDValue N, SDValue &Base,
73 SDValue &Offset, SDValue &Opc);
74 bool SelectAddrMode2Offset(SDValue Op, SDValue N,
75 SDValue &Offset, SDValue &Opc);
76 bool SelectAddrMode3(SDValue Op, SDValue N, SDValue &Base,
77 SDValue &Offset, SDValue &Opc);
78 bool SelectAddrMode3Offset(SDValue Op, SDValue N,
79 SDValue &Offset, SDValue &Opc);
80 bool SelectAddrMode5(SDValue Op, SDValue N, SDValue &Base,
82 bool SelectAddrMode6(SDValue Op, SDValue N, SDValue &Addr, SDValue &Update,
85 bool SelectAddrModePC(SDValue Op, SDValue N, SDValue &Offset,
88 bool SelectThumbAddrModeRR(SDValue Op, SDValue N, SDValue &Base,
90 bool SelectThumbAddrModeRI5(SDValue Op, SDValue N, unsigned Scale,
91 SDValue &Base, SDValue &OffImm,
93 bool SelectThumbAddrModeS1(SDValue Op, SDValue N, SDValue &Base,
94 SDValue &OffImm, SDValue &Offset);
95 bool SelectThumbAddrModeS2(SDValue Op, SDValue N, SDValue &Base,
96 SDValue &OffImm, SDValue &Offset);
97 bool SelectThumbAddrModeS4(SDValue Op, SDValue N, SDValue &Base,
98 SDValue &OffImm, SDValue &Offset);
99 bool SelectThumbAddrModeSP(SDValue Op, SDValue N, SDValue &Base,
102 bool SelectT2ShifterOperandReg(SDValue Op, SDValue N,
103 SDValue &BaseReg, SDValue &Opc);
104 bool SelectT2AddrModeImm12(SDValue Op, SDValue N, SDValue &Base,
106 bool SelectT2AddrModeImm8(SDValue Op, SDValue N, SDValue &Base,
108 bool SelectT2AddrModeImm8Offset(SDValue Op, SDValue N,
110 bool SelectT2AddrModeImm8s4(SDValue Op, SDValue N, SDValue &Base,
112 bool SelectT2AddrModeSoReg(SDValue Op, SDValue N, SDValue &Base,
113 SDValue &OffReg, SDValue &ShImm);
115 // Include the pieces autogenerated from the target description.
116 #include "ARMGenDAGISel.inc"
119 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
121 SDNode *SelectARMIndexedLoad(SDValue Op);
122 SDNode *SelectT2IndexedLoad(SDValue Op);
125 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
126 /// inline asm expressions.
127 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
129 std::vector<SDValue> &OutOps);
133 void ARMDAGToDAGISel::InstructionSelect() {
137 CurDAG->RemoveDeadNodes();
140 bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue Op,
145 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
147 // Don't match base register only case. That is matched to a separate
148 // lower complexity pattern with explicit register operand.
149 if (ShOpcVal == ARM_AM::no_shift) return false;
151 BaseReg = N.getOperand(0);
152 unsigned ShImmVal = 0;
153 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
154 ShReg = CurDAG->getRegister(0, MVT::i32);
155 ShImmVal = RHS->getZExtValue() & 31;
157 ShReg = N.getOperand(1);
159 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
164 bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
165 SDValue &Base, SDValue &Offset,
167 if (N.getOpcode() == ISD::MUL) {
168 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
169 // X * [3,5,9] -> X + X * [2,4,8] etc.
170 int RHSC = (int)RHS->getZExtValue();
173 ARM_AM::AddrOpc AddSub = ARM_AM::add;
175 AddSub = ARM_AM::sub;
178 if (isPowerOf2_32(RHSC)) {
179 unsigned ShAmt = Log2_32(RHSC);
180 Base = Offset = N.getOperand(0);
181 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
190 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
192 if (N.getOpcode() == ISD::FrameIndex) {
193 int FI = cast<FrameIndexSDNode>(N)->getIndex();
194 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
195 } else if (N.getOpcode() == ARMISD::Wrapper) {
196 Base = N.getOperand(0);
198 Offset = CurDAG->getRegister(0, MVT::i32);
199 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
205 // Match simple R +/- imm12 operands.
206 if (N.getOpcode() == ISD::ADD)
207 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
208 int RHSC = (int)RHS->getZExtValue();
209 if ((RHSC >= 0 && RHSC < 0x1000) ||
210 (RHSC < 0 && RHSC > -0x1000)) { // 12 bits.
211 Base = N.getOperand(0);
212 if (Base.getOpcode() == ISD::FrameIndex) {
213 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
214 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
216 Offset = CurDAG->getRegister(0, MVT::i32);
218 ARM_AM::AddrOpc AddSub = ARM_AM::add;
220 AddSub = ARM_AM::sub;
223 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
230 // Otherwise this is R +/- [possibly shifted] R
231 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub;
232 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
235 Base = N.getOperand(0);
236 Offset = N.getOperand(1);
238 if (ShOpcVal != ARM_AM::no_shift) {
239 // Check to see if the RHS of the shift is a constant, if not, we can't fold
241 if (ConstantSDNode *Sh =
242 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
243 ShAmt = Sh->getZExtValue();
244 Offset = N.getOperand(1).getOperand(0);
246 ShOpcVal = ARM_AM::no_shift;
250 // Try matching (R shl C) + (R).
251 if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift) {
252 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
253 if (ShOpcVal != ARM_AM::no_shift) {
254 // Check to see if the RHS of the shift is a constant, if not, we can't
256 if (ConstantSDNode *Sh =
257 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
258 ShAmt = Sh->getZExtValue();
259 Offset = N.getOperand(0).getOperand(0);
260 Base = N.getOperand(1);
262 ShOpcVal = ARM_AM::no_shift;
267 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
272 bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDValue Op, SDValue N,
273 SDValue &Offset, SDValue &Opc) {
274 unsigned Opcode = Op.getOpcode();
275 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
276 ? cast<LoadSDNode>(Op)->getAddressingMode()
277 : cast<StoreSDNode>(Op)->getAddressingMode();
278 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
279 ? ARM_AM::add : ARM_AM::sub;
280 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
281 int Val = (int)C->getZExtValue();
282 if (Val >= 0 && Val < 0x1000) { // 12 bits.
283 Offset = CurDAG->getRegister(0, MVT::i32);
284 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
292 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
294 if (ShOpcVal != ARM_AM::no_shift) {
295 // Check to see if the RHS of the shift is a constant, if not, we can't fold
297 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
298 ShAmt = Sh->getZExtValue();
299 Offset = N.getOperand(0);
301 ShOpcVal = ARM_AM::no_shift;
305 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
311 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N,
312 SDValue &Base, SDValue &Offset,
314 if (N.getOpcode() == ISD::SUB) {
315 // X - C is canonicalize to X + -C, no need to handle it here.
316 Base = N.getOperand(0);
317 Offset = N.getOperand(1);
318 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
322 if (N.getOpcode() != ISD::ADD) {
324 if (N.getOpcode() == ISD::FrameIndex) {
325 int FI = cast<FrameIndexSDNode>(N)->getIndex();
326 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
328 Offset = CurDAG->getRegister(0, MVT::i32);
329 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
333 // If the RHS is +/- imm8, fold into addr mode.
334 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
335 int RHSC = (int)RHS->getZExtValue();
336 if ((RHSC >= 0 && RHSC < 256) ||
337 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
338 Base = N.getOperand(0);
339 if (Base.getOpcode() == ISD::FrameIndex) {
340 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
341 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
343 Offset = CurDAG->getRegister(0, MVT::i32);
345 ARM_AM::AddrOpc AddSub = ARM_AM::add;
347 AddSub = ARM_AM::sub;
350 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
355 Base = N.getOperand(0);
356 Offset = N.getOperand(1);
357 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
361 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDValue Op, SDValue N,
362 SDValue &Offset, SDValue &Opc) {
363 unsigned Opcode = Op.getOpcode();
364 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
365 ? cast<LoadSDNode>(Op)->getAddressingMode()
366 : cast<StoreSDNode>(Op)->getAddressingMode();
367 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
368 ? ARM_AM::add : ARM_AM::sub;
369 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
370 int Val = (int)C->getZExtValue();
371 if (Val >= 0 && Val < 256) {
372 Offset = CurDAG->getRegister(0, MVT::i32);
373 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
379 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
384 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N,
385 SDValue &Base, SDValue &Offset) {
386 if (N.getOpcode() != ISD::ADD) {
388 if (N.getOpcode() == ISD::FrameIndex) {
389 int FI = cast<FrameIndexSDNode>(N)->getIndex();
390 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
391 } else if (N.getOpcode() == ARMISD::Wrapper) {
392 Base = N.getOperand(0);
394 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
399 // If the RHS is +/- imm8, fold into addr mode.
400 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
401 int RHSC = (int)RHS->getZExtValue();
402 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied by 4.
404 if ((RHSC >= 0 && RHSC < 256) ||
405 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
406 Base = N.getOperand(0);
407 if (Base.getOpcode() == ISD::FrameIndex) {
408 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
409 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
412 ARM_AM::AddrOpc AddSub = ARM_AM::add;
414 AddSub = ARM_AM::sub;
417 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
425 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
430 bool ARMDAGToDAGISel::SelectAddrMode6(SDValue Op, SDValue N,
431 SDValue &Addr, SDValue &Update,
434 // The optional writeback is handled in ARMLoadStoreOpt.
435 Update = CurDAG->getRegister(0, MVT::i32);
436 Opc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(false), MVT::i32);
440 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue Op, SDValue N,
441 SDValue &Offset, SDValue &Label) {
442 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
443 Offset = N.getOperand(0);
444 SDValue N1 = N.getOperand(1);
445 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
452 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue Op, SDValue N,
453 SDValue &Base, SDValue &Offset){
454 // FIXME dl should come from the parent load or store, not the address
455 DebugLoc dl = Op.getDebugLoc();
456 if (N.getOpcode() != ISD::ADD) {
458 // We must materialize a zero in a reg! Returning a constant here
459 // wouldn't work without additional code to position the node within
460 // ISel's topological ordering in a place where ISel will process it
461 // normally. Instead, just explicitly issue a tMOVri8 node!
462 Offset = SDValue(CurDAG->getTargetNode(ARM::tMOVi8, dl, MVT::i32,
463 CurDAG->getTargetConstant(0, MVT::i32)), 0);
467 Base = N.getOperand(0);
468 Offset = N.getOperand(1);
473 ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDValue Op, SDValue N,
474 unsigned Scale, SDValue &Base,
475 SDValue &OffImm, SDValue &Offset) {
477 SDValue TmpBase, TmpOffImm;
478 if (SelectThumbAddrModeSP(Op, N, TmpBase, TmpOffImm))
479 return false; // We want to select tLDRspi / tSTRspi instead.
480 if (N.getOpcode() == ARMISD::Wrapper &&
481 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
482 return false; // We want to select tLDRpci instead.
485 if (N.getOpcode() != ISD::ADD) {
486 Base = (N.getOpcode() == ARMISD::Wrapper) ? N.getOperand(0) : N;
487 Offset = CurDAG->getRegister(0, MVT::i32);
488 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
492 // Thumb does not have [sp, r] address mode.
493 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
494 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
495 if ((LHSR && LHSR->getReg() == ARM::SP) ||
496 (RHSR && RHSR->getReg() == ARM::SP)) {
498 Offset = CurDAG->getRegister(0, MVT::i32);
499 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
503 // If the RHS is + imm5 * scale, fold into addr mode.
504 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
505 int RHSC = (int)RHS->getZExtValue();
506 if ((RHSC & (Scale-1)) == 0) { // The constant is implicitly multiplied.
508 if (RHSC >= 0 && RHSC < 32) {
509 Base = N.getOperand(0);
510 Offset = CurDAG->getRegister(0, MVT::i32);
511 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
517 Base = N.getOperand(0);
518 Offset = N.getOperand(1);
519 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
523 bool ARMDAGToDAGISel::SelectThumbAddrModeS1(SDValue Op, SDValue N,
524 SDValue &Base, SDValue &OffImm,
526 return SelectThumbAddrModeRI5(Op, N, 1, Base, OffImm, Offset);
529 bool ARMDAGToDAGISel::SelectThumbAddrModeS2(SDValue Op, SDValue N,
530 SDValue &Base, SDValue &OffImm,
532 return SelectThumbAddrModeRI5(Op, N, 2, Base, OffImm, Offset);
535 bool ARMDAGToDAGISel::SelectThumbAddrModeS4(SDValue Op, SDValue N,
536 SDValue &Base, SDValue &OffImm,
538 return SelectThumbAddrModeRI5(Op, N, 4, Base, OffImm, Offset);
541 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue Op, SDValue N,
542 SDValue &Base, SDValue &OffImm) {
543 if (N.getOpcode() == ISD::FrameIndex) {
544 int FI = cast<FrameIndexSDNode>(N)->getIndex();
545 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
546 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
550 if (N.getOpcode() != ISD::ADD)
553 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
554 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
555 (LHSR && LHSR->getReg() == ARM::SP)) {
556 // If the RHS is + imm8 * scale, fold into addr mode.
557 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
558 int RHSC = (int)RHS->getZExtValue();
559 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied.
561 if (RHSC >= 0 && RHSC < 256) {
562 Base = N.getOperand(0);
563 if (Base.getOpcode() == ISD::FrameIndex) {
564 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
565 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
567 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
577 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue Op, SDValue N,
580 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
582 // Don't match base register only case. That is matched to a separate
583 // lower complexity pattern with explicit register operand.
584 if (ShOpcVal == ARM_AM::no_shift) return false;
586 BaseReg = N.getOperand(0);
587 unsigned ShImmVal = 0;
588 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
589 ShImmVal = RHS->getZExtValue() & 31;
590 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
597 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue Op, SDValue N,
598 SDValue &Base, SDValue &OffImm) {
599 // Match simple R + imm12 operands.
600 if (N.getOpcode() != ISD::ADD)
603 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
604 int RHSC = (int)RHS->getZExtValue();
605 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits.
606 Base = N.getOperand(0);
607 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
615 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue Op, SDValue N,
616 SDValue &Base, SDValue &OffImm) {
617 if (N.getOpcode() == ISD::ADD) {
618 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
619 int RHSC = (int)RHS->getZExtValue();
620 if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
621 Base = N.getOperand(0);
622 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
626 } else if (N.getOpcode() == ISD::SUB) {
627 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
628 int RHSC = (int)RHS->getZExtValue();
629 if (RHSC >= 0 && RHSC < 0x100) { // 8 bits.
630 Base = N.getOperand(0);
631 OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32);
640 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDValue Op, SDValue N,
642 unsigned Opcode = Op.getOpcode();
643 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
644 ? cast<LoadSDNode>(Op)->getAddressingMode()
645 : cast<StoreSDNode>(Op)->getAddressingMode();
646 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N)) {
647 int RHSC = (int)RHS->getZExtValue();
648 if (RHSC >= 0 && RHSC < 0x100) { // 8 bits.
649 OffImm = (AM == ISD::PRE_INC)
650 ? CurDAG->getTargetConstant(RHSC, MVT::i32)
651 : CurDAG->getTargetConstant(-RHSC, MVT::i32);
659 bool ARMDAGToDAGISel::SelectT2AddrModeImm8s4(SDValue Op, SDValue N,
660 SDValue &Base, SDValue &OffImm) {
661 if (N.getOpcode() == ISD::ADD) {
662 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
663 int RHSC = (int)RHS->getZExtValue();
664 if (((RHSC & 0x3) == 0) &&
665 ((RHSC >= 0 && RHSC < 0x400) || (RHSC < 0 && RHSC > -0x400))) { // 8 bits.
666 Base = N.getOperand(0);
667 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
671 } else if (N.getOpcode() == ISD::SUB) {
672 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
673 int RHSC = (int)RHS->getZExtValue();
674 if (((RHSC & 0x3) == 0) && (RHSC >= 0 && RHSC < 0x400)) { // 8 bits.
675 Base = N.getOperand(0);
676 OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32);
685 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue Op, SDValue N,
687 SDValue &OffReg, SDValue &ShImm) {
689 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
691 if (N.getOpcode() == ISD::FrameIndex) {
692 int FI = cast<FrameIndexSDNode>(N)->getIndex();
693 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
694 } else if (N.getOpcode() == ARMISD::Wrapper) {
695 Base = N.getOperand(0);
696 if (Base.getOpcode() == ISD::TargetConstantPool)
697 return false; // We want to select t2LDRpci instead.
699 OffReg = CurDAG->getRegister(0, MVT::i32);
700 ShImm = CurDAG->getTargetConstant(0, MVT::i32);
704 // Look for (R + R) or (R + (R << [1,2,3])).
706 Base = N.getOperand(0);
707 OffReg = N.getOperand(1);
709 // Swap if it is ((R << c) + R).
710 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg);
711 if (ShOpcVal != ARM_AM::lsl) {
712 ShOpcVal = ARM_AM::getShiftOpcForNode(Base);
713 if (ShOpcVal == ARM_AM::lsl)
714 std::swap(Base, OffReg);
717 if (ShOpcVal == ARM_AM::lsl) {
718 // Check to see if the RHS of the shift is a constant, if not, we can't fold
720 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
721 ShAmt = Sh->getZExtValue();
724 ShOpcVal = ARM_AM::no_shift;
726 OffReg = OffReg.getOperand(0);
728 ShOpcVal = ARM_AM::no_shift;
730 } else if (SelectT2AddrModeImm12(Op, N, Base, ShImm) ||
731 SelectT2AddrModeImm8 (Op, N, Base, ShImm))
732 // Don't match if it's possible to match to one of the r +/- imm cases.
735 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
740 //===--------------------------------------------------------------------===//
742 /// getAL - Returns a ARMCC::AL immediate node.
743 static inline SDValue getAL(SelectionDAG *CurDAG) {
744 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
747 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDValue Op) {
748 LoadSDNode *LD = cast<LoadSDNode>(Op);
749 ISD::MemIndexedMode AM = LD->getAddressingMode();
750 if (AM == ISD::UNINDEXED)
753 MVT LoadedVT = LD->getMemoryVT();
754 SDValue Offset, AMOpc;
755 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
758 if (LoadedVT == MVT::i32 &&
759 SelectAddrMode2Offset(Op, LD->getOffset(), Offset, AMOpc)) {
760 Opcode = isPre ? ARM::LDR_PRE : ARM::LDR_POST;
762 } else if (LoadedVT == MVT::i16 &&
763 SelectAddrMode3Offset(Op, LD->getOffset(), Offset, AMOpc)) {
765 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
766 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
767 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
768 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
769 if (LD->getExtensionType() == ISD::SEXTLOAD) {
770 if (SelectAddrMode3Offset(Op, LD->getOffset(), Offset, AMOpc)) {
772 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
775 if (SelectAddrMode2Offset(Op, LD->getOffset(), Offset, AMOpc)) {
777 Opcode = isPre ? ARM::LDRB_PRE : ARM::LDRB_POST;
783 SDValue Chain = LD->getChain();
784 SDValue Base = LD->getBasePtr();
785 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
786 CurDAG->getRegister(0, MVT::i32), Chain };
787 return CurDAG->getTargetNode(Opcode, Op.getDebugLoc(), MVT::i32, MVT::i32,
794 SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDValue Op) {
795 LoadSDNode *LD = cast<LoadSDNode>(Op);
796 ISD::MemIndexedMode AM = LD->getAddressingMode();
797 if (AM == ISD::UNINDEXED)
800 MVT LoadedVT = LD->getMemoryVT();
801 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
803 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
806 if (SelectT2AddrModeImm8Offset(Op, LD->getOffset(), Offset)) {
807 switch (LoadedVT.getSimpleVT()) {
809 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
813 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
815 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
820 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
822 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
831 SDValue Chain = LD->getChain();
832 SDValue Base = LD->getBasePtr();
833 SDValue Ops[]= { Base, Offset, getAL(CurDAG),
834 CurDAG->getRegister(0, MVT::i32), Chain };
835 return CurDAG->getTargetNode(Opcode, Op.getDebugLoc(), MVT::i32, MVT::i32,
843 SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
844 SDNode *N = Op.getNode();
845 DebugLoc dl = N->getDebugLoc();
847 if (N->isMachineOpcode())
848 return NULL; // Already selected.
850 switch (N->getOpcode()) {
852 case ISD::Constant: {
853 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
855 if (Subtarget->isThumb()) {
856 if (Subtarget->hasThumb2())
857 // Thumb2 has the MOVT instruction, so all immediates can
858 // be done with MOV + MOVT, at worst.
861 UseCP = (Val > 255 && // MOV
862 ~Val > 255 && // MOV + MVN
863 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
865 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
866 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
867 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
870 CurDAG->getTargetConstantPool(ConstantInt::get(Type::Int32Ty, Val),
874 if (Subtarget->isThumb1Only())
875 ResNode = CurDAG->getTargetNode(ARM::tLDRcp, dl, MVT::i32, MVT::Other,
876 CPIdx, CurDAG->getEntryNode());
880 CurDAG->getRegister(0, MVT::i32),
881 CurDAG->getTargetConstant(0, MVT::i32),
883 CurDAG->getRegister(0, MVT::i32),
884 CurDAG->getEntryNode()
886 ResNode=CurDAG->getTargetNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
889 ReplaceUses(Op, SDValue(ResNode, 0));
893 // Other cases are autogenerated.
896 case ISD::FrameIndex: {
897 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
898 int FI = cast<FrameIndexSDNode>(N)->getIndex();
899 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
900 if (Subtarget->isThumb1Only()) {
901 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI,
902 CurDAG->getTargetConstant(0, MVT::i32));
904 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
905 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
906 CurDAG->getRegister(0, MVT::i32) };
907 return CurDAG->SelectNodeTo(N, (Subtarget->hasThumb2()) ? ARM::t2ADDri : ARM::ADDri,
912 if (!Subtarget->isThumb1Only())
914 // Select add sp, c to tADDhirr.
915 SDValue N0 = Op.getOperand(0);
916 SDValue N1 = Op.getOperand(1);
917 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(Op.getOperand(0));
918 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(Op.getOperand(1));
919 if (LHSR && LHSR->getReg() == ARM::SP) {
921 std::swap(LHSR, RHSR);
923 if (RHSR && RHSR->getReg() == ARM::SP) {
924 SDValue Val = SDValue(CurDAG->getTargetNode(ARM::tMOVlor2hir, dl,
925 Op.getValueType(), N0, N0), 0);
926 return CurDAG->SelectNodeTo(N, ARM::tADDhirr, Op.getValueType(), Val, N1);
931 if (Subtarget->isThumb1Only())
933 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
934 unsigned RHSV = C->getZExtValue();
936 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
937 SDValue V = Op.getOperand(0);
938 unsigned ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, Log2_32(RHSV-1));
939 SDValue Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32),
940 CurDAG->getTargetConstant(ShImm, MVT::i32),
941 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
942 CurDAG->getRegister(0, MVT::i32) };
943 return CurDAG->SelectNodeTo(N, (Subtarget->hasThumb2()) ?
944 ARM::t2ADDrs : ARM::ADDrs, MVT::i32, Ops, 7);
946 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
947 SDValue V = Op.getOperand(0);
948 unsigned ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, Log2_32(RHSV+1));
949 SDValue Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32),
950 CurDAG->getTargetConstant(ShImm, MVT::i32),
951 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
952 CurDAG->getRegister(0, MVT::i32) };
953 return CurDAG->SelectNodeTo(N, (Subtarget->hasThumb2()) ?
954 ARM::t2RSBrs : ARM::RSBrs, MVT::i32, Ops, 7);
959 return CurDAG->getTargetNode(ARM::FMRRD, dl, MVT::i32, MVT::i32,
960 Op.getOperand(0), getAL(CurDAG),
961 CurDAG->getRegister(0, MVT::i32));
962 case ISD::UMUL_LOHI: {
963 if (Subtarget->isThumb1Only())
965 if (Subtarget->isThumb()) {
966 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
967 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
968 CurDAG->getRegister(0, MVT::i32) };
969 return CurDAG->getTargetNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops,4);
971 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
972 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
973 CurDAG->getRegister(0, MVT::i32) };
974 return CurDAG->getTargetNode(ARM::UMULL, dl, MVT::i32, MVT::i32, Ops, 5);
977 case ISD::SMUL_LOHI: {
978 if (Subtarget->isThumb1Only())
980 if (Subtarget->isThumb()) {
981 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
982 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
983 return CurDAG->getTargetNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops,4);
985 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
986 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
987 CurDAG->getRegister(0, MVT::i32) };
988 return CurDAG->getTargetNode(ARM::SMULL, dl, MVT::i32, MVT::i32, Ops, 5);
993 if (Subtarget->isThumb() && Subtarget->hasThumb2())
994 ResNode = SelectT2IndexedLoad(Op);
996 ResNode = SelectARMIndexedLoad(Op);
999 // Other cases are autogenerated.
1002 case ARMISD::BRCOND: {
1003 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
1004 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
1005 // Pattern complexity = 6 cost = 1 size = 0
1007 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
1008 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
1009 // Pattern complexity = 6 cost = 1 size = 0
1011 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
1012 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
1013 // Pattern complexity = 6 cost = 1 size = 0
1015 unsigned Opc = Subtarget->isThumb() ?
1016 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
1017 SDValue Chain = Op.getOperand(0);
1018 SDValue N1 = Op.getOperand(1);
1019 SDValue N2 = Op.getOperand(2);
1020 SDValue N3 = Op.getOperand(3);
1021 SDValue InFlag = Op.getOperand(4);
1022 assert(N1.getOpcode() == ISD::BasicBlock);
1023 assert(N2.getOpcode() == ISD::Constant);
1024 assert(N3.getOpcode() == ISD::Register);
1026 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1027 cast<ConstantSDNode>(N2)->getZExtValue()),
1029 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
1030 SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, MVT::Other,
1032 Chain = SDValue(ResNode, 0);
1033 if (Op.getNode()->getNumValues() == 2) {
1034 InFlag = SDValue(ResNode, 1);
1035 ReplaceUses(SDValue(Op.getNode(), 1), InFlag);
1037 ReplaceUses(SDValue(Op.getNode(), 0), SDValue(Chain.getNode(), Chain.getResNo()));
1040 case ARMISD::CMOV: {
1041 MVT VT = Op.getValueType();
1042 SDValue N0 = Op.getOperand(0);
1043 SDValue N1 = Op.getOperand(1);
1044 SDValue N2 = Op.getOperand(2);
1045 SDValue N3 = Op.getOperand(3);
1046 SDValue InFlag = Op.getOperand(4);
1047 assert(N2.getOpcode() == ISD::Constant);
1048 assert(N3.getOpcode() == ISD::Register);
1050 if (!Subtarget->isThumb1Only() && VT == MVT::i32) {
1051 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
1052 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
1053 // Pattern complexity = 18 cost = 1 size = 0
1057 if (Subtarget->isThumb()) {
1058 if (SelectT2ShifterOperandReg(Op, N1, CPTmp0, CPTmp1)) {
1059 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1060 cast<ConstantSDNode>(N2)->getZExtValue()),
1062 SDValue Ops[] = { N0, CPTmp0, CPTmp1, Tmp2, N3, InFlag };
1063 return CurDAG->SelectNodeTo(Op.getNode(),
1064 ARM::t2MOVCCs, MVT::i32,Ops, 6);
1067 if (SelectShifterOperandReg(Op, N1, CPTmp0, CPTmp1, CPTmp2)) {
1068 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1069 cast<ConstantSDNode>(N2)->getZExtValue()),
1071 SDValue Ops[] = { N0, CPTmp0, CPTmp1, CPTmp2, Tmp2, N3, InFlag };
1072 return CurDAG->SelectNodeTo(Op.getNode(),
1073 ARM::MOVCCs, MVT::i32, Ops, 7);
1077 // Pattern: (ARMcmov:i32 GPR:i32:$false,
1078 // (imm:i32)<<P:Predicate_so_imm>>:$true,
1080 // Emits: (MOVCCi:i32 GPR:i32:$false,
1081 // (so_imm:i32 (imm:i32):$true), (imm:i32):$cc)
1082 // Pattern complexity = 10 cost = 1 size = 0
1083 if (N3.getOpcode() == ISD::Constant) {
1084 if (Subtarget->isThumb()) {
1085 if (Predicate_t2_so_imm(N3.getNode())) {
1086 SDValue Tmp1 = CurDAG->getTargetConstant(((unsigned)
1087 cast<ConstantSDNode>(N1)->getZExtValue()),
1089 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1090 cast<ConstantSDNode>(N2)->getZExtValue()),
1092 SDValue Ops[] = { N0, Tmp1, Tmp2, N3, InFlag };
1093 return CurDAG->SelectNodeTo(Op.getNode(),
1094 ARM::t2MOVCCi, MVT::i32, Ops, 5);
1097 if (Predicate_so_imm(N3.getNode())) {
1098 SDValue Tmp1 = CurDAG->getTargetConstant(((unsigned)
1099 cast<ConstantSDNode>(N1)->getZExtValue()),
1101 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1102 cast<ConstantSDNode>(N2)->getZExtValue()),
1104 SDValue Ops[] = { N0, Tmp1, Tmp2, N3, InFlag };
1105 return CurDAG->SelectNodeTo(Op.getNode(),
1106 ARM::MOVCCi, MVT::i32, Ops, 5);
1112 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
1113 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
1114 // Pattern complexity = 6 cost = 1 size = 0
1116 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
1117 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
1118 // Pattern complexity = 6 cost = 11 size = 0
1120 // Also FCPYScc and FCPYDcc.
1121 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1122 cast<ConstantSDNode>(N2)->getZExtValue()),
1124 SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
1126 switch (VT.getSimpleVT()) {
1127 default: assert(false && "Illegal conditional move type!");
1130 Opc = Subtarget->isThumb()
1131 ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr)
1141 return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5);
1143 case ARMISD::CNEG: {
1144 MVT VT = Op.getValueType();
1145 SDValue N0 = Op.getOperand(0);
1146 SDValue N1 = Op.getOperand(1);
1147 SDValue N2 = Op.getOperand(2);
1148 SDValue N3 = Op.getOperand(3);
1149 SDValue InFlag = Op.getOperand(4);
1150 assert(N2.getOpcode() == ISD::Constant);
1151 assert(N3.getOpcode() == ISD::Register);
1153 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1154 cast<ConstantSDNode>(N2)->getZExtValue()),
1156 SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
1158 switch (VT.getSimpleVT()) {
1159 default: assert(false && "Illegal conditional move type!");
1168 return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5);
1171 case ISD::DECLARE: {
1172 SDValue Chain = Op.getOperand(0);
1173 SDValue N1 = Op.getOperand(1);
1174 SDValue N2 = Op.getOperand(2);
1175 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N1);
1176 // FIXME: handle VLAs.
1178 ReplaceUses(Op.getValue(0), Chain);
1181 if (N2.getOpcode() == ARMISD::PIC_ADD && isa<LoadSDNode>(N2.getOperand(0)))
1182 N2 = N2.getOperand(0);
1183 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N2);
1185 ReplaceUses(Op.getValue(0), Chain);
1188 SDValue BasePtr = Ld->getBasePtr();
1189 assert(BasePtr.getOpcode() == ARMISD::Wrapper &&
1190 isa<ConstantPoolSDNode>(BasePtr.getOperand(0)) &&
1191 "llvm.dbg.variable should be a constantpool node");
1192 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(BasePtr.getOperand(0));
1193 GlobalValue *GV = 0;
1194 if (CP->isMachineConstantPoolEntry()) {
1195 ARMConstantPoolValue *ACPV = (ARMConstantPoolValue*)CP->getMachineCPVal();
1198 GV = dyn_cast<GlobalValue>(CP->getConstVal());
1200 ReplaceUses(Op.getValue(0), Chain);
1204 SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(),
1205 TLI.getPointerTy());
1206 SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
1207 SDValue Ops[] = { Tmp1, Tmp2, Chain };
1208 return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, dl,
1209 MVT::Other, Ops, 3);
1212 case ISD::CONCAT_VECTORS: {
1213 MVT VT = Op.getValueType();
1214 assert(VT.is128BitVector() && Op.getNumOperands() == 2 &&
1215 "unexpected CONCAT_VECTORS");
1216 SDValue N0 = Op.getOperand(0);
1217 SDValue N1 = Op.getOperand(1);
1219 CurDAG->getTargetNode(TargetInstrInfo::IMPLICIT_DEF, dl, VT);
1220 if (N0.getOpcode() != ISD::UNDEF)
1221 Result = CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl, VT,
1222 SDValue(Result, 0), N0,
1223 CurDAG->getTargetConstant(arm_dsubreg_0,
1225 if (N1.getOpcode() != ISD::UNDEF)
1226 Result = CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl, VT,
1227 SDValue(Result, 0), N1,
1228 CurDAG->getTargetConstant(arm_dsubreg_1,
1233 case ISD::VECTOR_SHUFFLE: {
1234 MVT VT = Op.getValueType();
1236 // Match 128-bit splat to VDUPLANEQ. (This could be done with a Pat in
1237 // ARMInstrNEON.td but it is awkward because the shuffle mask needs to be
1238 // transformed first into a lane number and then to both a subregister
1239 // index and an adjusted lane number.) If the source operand is a
1240 // SCALAR_TO_VECTOR, leave it so it will be matched later as a VDUP.
1241 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1242 if (VT.is128BitVector() && SVOp->isSplat() &&
1243 Op.getOperand(0).getOpcode() != ISD::SCALAR_TO_VECTOR &&
1244 Op.getOperand(1).getOpcode() == ISD::UNDEF) {
1245 unsigned LaneVal = SVOp->getSplatIndex();
1249 switch (VT.getVectorElementType().getSimpleVT()) {
1250 default: assert(false && "unhandled VDUP splat type");
1251 case MVT::i8: Opc = ARM::VDUPLN8q; HalfVT = MVT::v8i8; break;
1252 case MVT::i16: Opc = ARM::VDUPLN16q; HalfVT = MVT::v4i16; break;
1253 case MVT::i32: Opc = ARM::VDUPLN32q; HalfVT = MVT::v2i32; break;
1254 case MVT::f32: Opc = ARM::VDUPLNfq; HalfVT = MVT::v2f32; break;
1257 // The source operand needs to be changed to a subreg of the original
1258 // 128-bit operand, and the lane number needs to be adjusted accordingly.
1259 unsigned NumElts = VT.getVectorNumElements() / 2;
1260 unsigned SRVal = (LaneVal < NumElts ? arm_dsubreg_0 : arm_dsubreg_1);
1261 SDValue SR = CurDAG->getTargetConstant(SRVal, MVT::i32);
1262 SDValue NewLane = CurDAG->getTargetConstant(LaneVal % NumElts, MVT::i32);
1263 SDNode *SubReg = CurDAG->getTargetNode(TargetInstrInfo::EXTRACT_SUBREG,
1264 dl, HalfVT, N->getOperand(0), SR);
1265 return CurDAG->SelectNodeTo(N, Opc, VT, SDValue(SubReg, 0), NewLane);
1272 return SelectCode(Op);
1275 bool ARMDAGToDAGISel::
1276 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
1277 std::vector<SDValue> &OutOps) {
1278 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
1280 SDValue Base, Offset, Opc;
1281 if (!SelectAddrMode2(Op, Op, Base, Offset, Opc))
1284 OutOps.push_back(Base);
1285 OutOps.push_back(Offset);
1286 OutOps.push_back(Opc);
1290 /// createARMISelDag - This pass converts a legalized DAG into a
1291 /// ARM-specific DAG, ready for instruction scheduling.
1293 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM) {
1294 return new ARMDAGToDAGISel(TM);