1 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Evan Cheng and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
16 #include "ARMAddressingModes.h"
17 #include "ARMConstantPoolValue.h"
18 #include "ARMISelLowering.h"
19 #include "ARMMachineFunctionInfo.h"
20 #include "ARMRegisterInfo.h"
21 #include "ARMSubtarget.h"
22 #include "ARMTargetMachine.h"
23 #include "llvm/CallingConv.h"
24 #include "llvm/Constants.h"
25 #include "llvm/Instruction.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/SelectionDAG.h"
32 #include "llvm/CodeGen/SSARegMap.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/ADT/VectorExtras.h"
35 #include "llvm/Support/MathExtras.h"
38 ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
39 : TargetLowering(TM), ARMPCLabelIndex(0) {
40 Subtarget = &TM.getSubtarget<ARMSubtarget>();
42 if (Subtarget->isTargetDarwin()) {
44 setLibcallName(RTLIB::UINTTOFP_I64_F32, NULL);
45 setLibcallName(RTLIB::UINTTOFP_I64_F64, NULL);
47 // Uses VFP for Thumb libfuncs if available.
48 if (Subtarget->isThumb() && Subtarget->hasVFP2()) {
49 // Single-precision floating-point arithmetic.
50 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
51 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
52 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
53 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
55 // Double-precision floating-point arithmetic.
56 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
57 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
58 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
59 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
61 // Single-precision comparisons.
62 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
63 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
64 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
65 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
66 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
67 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
68 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp");
69 setLibcallName(RTLIB::O_F32, "__unordsf2vfp");
71 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
72 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
73 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
74 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
75 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
76 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
77 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE);
78 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ);
80 // Double-precision comparisons.
81 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
82 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
83 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
84 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
85 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
86 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
87 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp");
88 setLibcallName(RTLIB::O_F64, "__unorddf2vfp");
90 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
91 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
92 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
93 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
94 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
95 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
96 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE);
97 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ);
99 // Floating-point to integer conversions.
100 // i64 conversions are done via library routines even when generating VFP
101 // instructions, so use the same ones.
102 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
103 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
104 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
105 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
107 // Conversions between floating types.
108 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
109 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp");
111 // Integer to floating-point conversions.
112 // i64 conversions are done via library routines even when generating VFP
113 // instructions, so use the same ones.
114 // FIXME: There appears to be some naming inconsistency in ARM libgcc: e.g.
115 // __floatunsidf vs. __floatunssidfvfp.
116 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
117 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
118 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
119 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
123 addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
124 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb()) {
125 addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
126 addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
128 computeRegisterProperties();
130 // ARM does not have f32 extending load.
131 setLoadXAction(ISD::EXTLOAD, MVT::f32, Expand);
133 // ARM supports all 4 flavors of integer indexed load / store.
134 for (unsigned im = (unsigned)ISD::PRE_INC;
135 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
136 setIndexedLoadAction(im, MVT::i1, Legal);
137 setIndexedLoadAction(im, MVT::i8, Legal);
138 setIndexedLoadAction(im, MVT::i16, Legal);
139 setIndexedLoadAction(im, MVT::i32, Legal);
140 setIndexedStoreAction(im, MVT::i1, Legal);
141 setIndexedStoreAction(im, MVT::i8, Legal);
142 setIndexedStoreAction(im, MVT::i16, Legal);
143 setIndexedStoreAction(im, MVT::i32, Legal);
146 // i64 operation support.
147 if (Subtarget->isThumb()) {
148 setOperationAction(ISD::MUL, MVT::i64, Expand);
149 setOperationAction(ISD::MULHU, MVT::i32, Expand);
150 setOperationAction(ISD::MULHS, MVT::i32, Expand);
151 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
152 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
154 setOperationAction(ISD::MUL, MVT::i64, Expand);
155 setOperationAction(ISD::MULHU, MVT::i32, Expand);
156 if (!Subtarget->hasV6Ops())
157 setOperationAction(ISD::MULHS, MVT::i32, Expand);
159 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
160 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
161 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
162 setOperationAction(ISD::SRL, MVT::i64, Custom);
163 setOperationAction(ISD::SRA, MVT::i64, Custom);
165 // ARM does not have ROTL.
166 setOperationAction(ISD::ROTL, MVT::i32, Expand);
167 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
168 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
169 if (!Subtarget->hasV5TOps() || Subtarget->isThumb())
170 setOperationAction(ISD::CTLZ, MVT::i32, Expand);
172 // Only ARMv6 has BSWAP.
173 if (!Subtarget->hasV6Ops())
174 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
176 // These are expanded into libcalls.
177 setOperationAction(ISD::SDIV, MVT::i32, Expand);
178 setOperationAction(ISD::UDIV, MVT::i32, Expand);
179 setOperationAction(ISD::SREM, MVT::i32, Expand);
180 setOperationAction(ISD::UREM, MVT::i32, Expand);
181 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
182 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
184 // Support label based line numbers.
185 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
186 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
188 setOperationAction(ISD::RET, MVT::Other, Custom);
189 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
190 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
191 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
192 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
194 // Expand mem operations genericly.
195 setOperationAction(ISD::MEMSET , MVT::Other, Expand);
196 setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
197 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
199 // Use the default implementation.
200 setOperationAction(ISD::VASTART , MVT::Other, Expand);
201 setOperationAction(ISD::VAARG , MVT::Other, Expand);
202 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
203 setOperationAction(ISD::VAEND , MVT::Other, Expand);
204 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
205 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
206 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
208 if (!Subtarget->hasV6Ops()) {
209 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
210 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
212 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
214 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb())
215 // Turn f64->i64 into FMRRD iff target supports vfp2.
216 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom);
218 // We want to custom lower some of our intrinsics.
219 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
221 setOperationAction(ISD::SETCC , MVT::i32, Expand);
222 setOperationAction(ISD::SETCC , MVT::f32, Expand);
223 setOperationAction(ISD::SETCC , MVT::f64, Expand);
224 setOperationAction(ISD::SELECT , MVT::i32, Expand);
225 setOperationAction(ISD::SELECT , MVT::f32, Expand);
226 setOperationAction(ISD::SELECT , MVT::f64, Expand);
227 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
228 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
229 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
231 setOperationAction(ISD::BRCOND , MVT::Other, Expand);
232 setOperationAction(ISD::BR_CC , MVT::i32, Custom);
233 setOperationAction(ISD::BR_CC , MVT::f32, Custom);
234 setOperationAction(ISD::BR_CC , MVT::f64, Custom);
235 setOperationAction(ISD::BR_JT , MVT::Other, Custom);
237 setOperationAction(ISD::VASTART, MVT::Other, Custom);
238 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
239 setOperationAction(ISD::VAEND, MVT::Other, Expand);
240 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
241 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
243 // FP Constants can't be immediates.
244 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
245 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
247 // We don't support sin/cos/fmod/copysign/pow
248 setOperationAction(ISD::FSIN , MVT::f64, Expand);
249 setOperationAction(ISD::FSIN , MVT::f32, Expand);
250 setOperationAction(ISD::FCOS , MVT::f32, Expand);
251 setOperationAction(ISD::FCOS , MVT::f64, Expand);
252 setOperationAction(ISD::FREM , MVT::f64, Expand);
253 setOperationAction(ISD::FREM , MVT::f32, Expand);
254 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
255 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
256 setOperationAction(ISD::FPOW , MVT::f64, Expand);
257 setOperationAction(ISD::FPOW , MVT::f32, Expand);
259 // int <-> fp are custom expanded into bit_convert + ARMISD ops.
260 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
261 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
262 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
263 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
265 // We have target-specific dag combine patterns for the following nodes:
266 // ARMISD::FMRRD - No need to call setTargetDAGCombine
268 setStackPointerRegisterToSaveRestore(ARM::SP);
269 setSchedulingPreference(SchedulingForRegPressure);
270 setIfCvtBlockSizeLimit(Subtarget->isThumb() ? 0 : 10);
271 setIfCvtDupBlockSizeLimit(Subtarget->isThumb() ? 0 : 2);
273 maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type
277 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
280 case ARMISD::Wrapper: return "ARMISD::Wrapper";
281 case ARMISD::WrapperJT: return "ARMISD::WrapperJT";
282 case ARMISD::CALL: return "ARMISD::CALL";
283 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED";
284 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK";
285 case ARMISD::tCALL: return "ARMISD::tCALL";
286 case ARMISD::BRCOND: return "ARMISD::BRCOND";
287 case ARMISD::BR_JT: return "ARMISD::BR_JT";
288 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG";
289 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD";
290 case ARMISD::CMP: return "ARMISD::CMP";
291 case ARMISD::CMPNZ: return "ARMISD::CMPNZ";
292 case ARMISD::CMPFP: return "ARMISD::CMPFP";
293 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0";
294 case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
295 case ARMISD::CMOV: return "ARMISD::CMOV";
296 case ARMISD::CNEG: return "ARMISD::CNEG";
298 case ARMISD::FTOSI: return "ARMISD::FTOSI";
299 case ARMISD::FTOUI: return "ARMISD::FTOUI";
300 case ARMISD::SITOF: return "ARMISD::SITOF";
301 case ARMISD::UITOF: return "ARMISD::UITOF";
303 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG";
304 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG";
305 case ARMISD::RRX: return "ARMISD::RRX";
307 case ARMISD::FMRRD: return "ARMISD::FMRRD";
308 case ARMISD::FMDRR: return "ARMISD::FMDRR";
310 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
314 //===----------------------------------------------------------------------===//
316 //===----------------------------------------------------------------------===//
319 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
320 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
322 default: assert(0 && "Unknown condition code!");
323 case ISD::SETNE: return ARMCC::NE;
324 case ISD::SETEQ: return ARMCC::EQ;
325 case ISD::SETGT: return ARMCC::GT;
326 case ISD::SETGE: return ARMCC::GE;
327 case ISD::SETLT: return ARMCC::LT;
328 case ISD::SETLE: return ARMCC::LE;
329 case ISD::SETUGT: return ARMCC::HI;
330 case ISD::SETUGE: return ARMCC::HS;
331 case ISD::SETULT: return ARMCC::LO;
332 case ISD::SETULE: return ARMCC::LS;
336 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. It
337 /// returns true if the operands should be inverted to form the proper
339 static bool FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
340 ARMCC::CondCodes &CondCode2) {
342 CondCode2 = ARMCC::AL;
344 default: assert(0 && "Unknown FP condition!");
346 case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
348 case ISD::SETOGT: CondCode = ARMCC::GT; break;
350 case ISD::SETOGE: CondCode = ARMCC::GE; break;
351 case ISD::SETOLT: CondCode = ARMCC::MI; break;
352 case ISD::SETOLE: CondCode = ARMCC::GT; Invert = true; break;
353 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
354 case ISD::SETO: CondCode = ARMCC::VC; break;
355 case ISD::SETUO: CondCode = ARMCC::VS; break;
356 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
357 case ISD::SETUGT: CondCode = ARMCC::HI; break;
358 case ISD::SETUGE: CondCode = ARMCC::PL; break;
360 case ISD::SETULT: CondCode = ARMCC::LT; break;
362 case ISD::SETULE: CondCode = ARMCC::LE; break;
364 case ISD::SETUNE: CondCode = ARMCC::NE; break;
370 HowToPassArgument(MVT::ValueType ObjectVT, unsigned NumGPRs,
371 unsigned StackOffset, unsigned &NeededGPRs,
372 unsigned &NeededStackSize, unsigned &GPRPad,
373 unsigned &StackPad, unsigned Flags) {
378 unsigned align = (Flags >> ISD::ParamFlags::OrigAlignmentOffs);
379 GPRPad = NumGPRs % ((align + 3)/4);
380 StackPad = StackOffset % align;
381 unsigned firstGPR = NumGPRs + GPRPad;
383 default: assert(0 && "Unhandled argument type!");
395 else if (firstGPR == 3) {
403 /// LowerCALL - Lowering a ISD::CALL node into a callseq_start <-
404 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
406 SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
407 MVT::ValueType RetVT= Op.Val->getValueType(0);
408 SDOperand Chain = Op.getOperand(0);
409 unsigned CallConv = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
410 assert((CallConv == CallingConv::C ||
411 CallConv == CallingConv::Fast) && "unknown calling convention");
412 SDOperand Callee = Op.getOperand(4);
413 unsigned NumOps = (Op.getNumOperands() - 5) / 2;
414 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
415 unsigned NumGPRs = 0; // GPRs used for parameter passing.
417 // Count how many bytes are to be pushed on the stack.
418 unsigned NumBytes = 0;
420 // Add up all the space actually used.
421 for (unsigned i = 0; i < NumOps; ++i) {
426 MVT::ValueType ObjectVT = Op.getOperand(5+2*i).getValueType();
427 unsigned Flags = Op.getConstantOperandVal(5+2*i+1);
428 HowToPassArgument(ObjectVT, NumGPRs, NumBytes, ObjGPRs, ObjSize,
429 GPRPad, StackPad, Flags);
430 NumBytes += ObjSize + StackPad;
431 NumGPRs += ObjGPRs + GPRPad;
434 // Adjust the stack pointer for the new arguments...
435 // These operations are automatically eliminated by the prolog/epilog pass
436 Chain = DAG.getCALLSEQ_START(Chain,
437 DAG.getConstant(NumBytes, MVT::i32));
439 SDOperand StackPtr = DAG.getRegister(ARM::SP, MVT::i32);
441 static const unsigned GPRArgRegs[] = {
442 ARM::R0, ARM::R1, ARM::R2, ARM::R3
446 std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
447 std::vector<SDOperand> MemOpChains;
448 for (unsigned i = 0; i != NumOps; ++i) {
449 SDOperand Arg = Op.getOperand(5+2*i);
450 unsigned Flags = Op.getConstantOperandVal(5+2*i+1);
451 MVT::ValueType ArgVT = Arg.getValueType();
457 HowToPassArgument(ArgVT, NumGPRs, ArgOffset, ObjGPRs,
458 ObjSize, GPRPad, StackPad, Flags);
460 ArgOffset += StackPad;
463 default: assert(0 && "Unexpected ValueType for argument!");
465 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Arg));
468 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs],
469 DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Arg)));
472 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Arg,
473 DAG.getConstant(0, getPointerTy()));
474 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Arg,
475 DAG.getConstant(1, getPointerTy()));
476 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Lo));
478 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs+1], Hi));
480 SDOperand PtrOff= DAG.getConstant(ArgOffset, StackPtr.getValueType());
481 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
482 MemOpChains.push_back(DAG.getStore(Chain, Hi, PtrOff, NULL, 0));
487 SDOperand Cvt = DAG.getNode(ARMISD::FMRRD,
488 DAG.getVTList(MVT::i32, MVT::i32),
490 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Cvt));
492 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs+1],
495 SDOperand PtrOff= DAG.getConstant(ArgOffset, StackPtr.getValueType());
496 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
497 MemOpChains.push_back(DAG.getStore(Chain, Cvt.getValue(1), PtrOff,
504 assert(ObjSize != 0);
505 SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
506 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
507 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
511 ArgOffset += ObjSize;
514 if (!MemOpChains.empty())
515 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
516 &MemOpChains[0], MemOpChains.size());
518 // Build a sequence of copy-to-reg nodes chained together with token chain
519 // and flag operands which copy the outgoing args into the appropriate regs.
521 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
522 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
524 InFlag = Chain.getValue(1);
527 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
528 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
529 // node so that legalize doesn't hack it.
530 bool isDirect = false;
531 bool isARMFunc = false;
532 bool isLocalARMFunc = false;
533 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
534 GlobalValue *GV = G->getGlobal();
536 bool isExt = (GV->isDeclaration() || GV->hasWeakLinkage() ||
537 GV->hasLinkOnceLinkage());
538 bool isStub = (isExt && Subtarget->isTargetDarwin()) &&
539 getTargetMachine().getRelocationModel() != Reloc::Static;
540 isARMFunc = !Subtarget->isThumb() || isStub;
541 // ARM call to a local ARM function is predicable.
542 isLocalARMFunc = !Subtarget->isThumb() && !isExt;
543 // tBX takes a register source operand.
544 if (isARMFunc && Subtarget->isThumb() && !Subtarget->hasV5TOps()) {
545 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMPCLabelIndex,
547 SDOperand CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 2);
548 CPAddr = DAG.getNode(ARMISD::Wrapper, MVT::i32, CPAddr);
549 Callee = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), CPAddr, NULL, 0);
550 SDOperand PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
551 Callee = DAG.getNode(ARMISD::PIC_ADD, getPointerTy(), Callee, PICLabel);
553 Callee = DAG.getTargetGlobalAddress(GV, getPointerTy());
554 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
556 bool isStub = Subtarget->isTargetDarwin() &&
557 getTargetMachine().getRelocationModel() != Reloc::Static;
558 isARMFunc = !Subtarget->isThumb() || isStub;
559 // tBX takes a register source operand.
560 const char *Sym = S->getSymbol();
561 if (isARMFunc && Subtarget->isThumb() && !Subtarget->hasV5TOps()) {
562 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(Sym, ARMPCLabelIndex,
564 SDOperand CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 2);
565 CPAddr = DAG.getNode(ARMISD::Wrapper, MVT::i32, CPAddr);
566 Callee = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), CPAddr, NULL, 0);
567 SDOperand PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
568 Callee = DAG.getNode(ARMISD::PIC_ADD, getPointerTy(), Callee, PICLabel);
570 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
573 // FIXME: handle tail calls differently.
575 if (Subtarget->isThumb()) {
576 if (!Subtarget->hasV5TOps() && (!isDirect || isARMFunc))
577 CallOpc = ARMISD::CALL_NOLINK;
579 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
581 CallOpc = (isDirect || Subtarget->hasV5TOps())
582 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL)
583 : ARMISD::CALL_NOLINK;
585 if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb()) {
586 // implicit def LR - LR mustn't be allocated as GRP:$dst of CALL_NOLINK
587 Chain = DAG.getCopyToReg(Chain, ARM::LR,
588 DAG.getNode(ISD::UNDEF, MVT::i32), InFlag);
589 InFlag = Chain.getValue(1);
592 std::vector<MVT::ValueType> NodeTys;
593 NodeTys.push_back(MVT::Other); // Returns a chain
594 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
596 std::vector<SDOperand> Ops;
597 Ops.push_back(Chain);
598 Ops.push_back(Callee);
600 // Add argument registers to the end of the list so that they are known live
602 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
603 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
604 RegsToPass[i].second.getValueType()));
607 Ops.push_back(InFlag);
608 Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size());
609 InFlag = Chain.getValue(1);
611 Chain = DAG.getCALLSEQ_END(Chain,
612 DAG.getConstant(NumBytes, MVT::i32),
613 DAG.getConstant(0, MVT::i32),
615 if (RetVT != MVT::Other)
616 InFlag = Chain.getValue(1);
618 std::vector<SDOperand> ResultVals;
621 // If the call has results, copy the values out of the ret val registers.
623 default: assert(0 && "Unexpected ret value!");
627 Chain = DAG.getCopyFromReg(Chain, ARM::R0, MVT::i32, InFlag).getValue(1);
628 ResultVals.push_back(Chain.getValue(0));
629 if (Op.Val->getValueType(1) == MVT::i32) {
630 // Returns a i64 value.
631 Chain = DAG.getCopyFromReg(Chain, ARM::R1, MVT::i32,
632 Chain.getValue(2)).getValue(1);
633 ResultVals.push_back(Chain.getValue(0));
634 NodeTys.push_back(MVT::i32);
636 NodeTys.push_back(MVT::i32);
639 Chain = DAG.getCopyFromReg(Chain, ARM::R0, MVT::i32, InFlag).getValue(1);
640 ResultVals.push_back(DAG.getNode(ISD::BIT_CONVERT, MVT::f32,
642 NodeTys.push_back(MVT::f32);
645 SDOperand Lo = DAG.getCopyFromReg(Chain, ARM::R0, MVT::i32, InFlag);
646 SDOperand Hi = DAG.getCopyFromReg(Lo, ARM::R1, MVT::i32, Lo.getValue(2));
647 ResultVals.push_back(DAG.getNode(ARMISD::FMDRR, MVT::f64, Lo, Hi));
648 NodeTys.push_back(MVT::f64);
653 NodeTys.push_back(MVT::Other);
655 if (ResultVals.empty())
658 ResultVals.push_back(Chain);
659 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, &ResultVals[0],
661 return Res.getValue(Op.ResNo);
664 static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) {
666 SDOperand Chain = Op.getOperand(0);
667 switch(Op.getNumOperands()) {
669 assert(0 && "Do not know how to return this many arguments!");
672 SDOperand LR = DAG.getRegister(ARM::LR, MVT::i32);
673 return DAG.getNode(ARMISD::RET_FLAG, MVT::Other, Chain);
676 Op = Op.getOperand(1);
677 if (Op.getValueType() == MVT::f32) {
678 Op = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Op);
679 } else if (Op.getValueType() == MVT::f64) {
680 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
682 Op = DAG.getNode(ARMISD::FMRRD, DAG.getVTList(MVT::i32, MVT::i32), &Op,1);
683 SDOperand Sign = DAG.getConstant(0, MVT::i32);
684 return DAG.getNode(ISD::RET, MVT::Other, Chain, Op, Sign,
685 Op.getValue(1), Sign);
687 Copy = DAG.getCopyToReg(Chain, ARM::R0, Op, SDOperand());
688 if (DAG.getMachineFunction().liveout_empty())
689 DAG.getMachineFunction().addLiveOut(ARM::R0);
692 Copy = DAG.getCopyToReg(Chain, ARM::R1, Op.getOperand(3), SDOperand());
693 Copy = DAG.getCopyToReg(Copy, ARM::R0, Op.getOperand(1), Copy.getValue(1));
694 // If we haven't noted the R0+R1 are live out, do so now.
695 if (DAG.getMachineFunction().liveout_empty()) {
696 DAG.getMachineFunction().addLiveOut(ARM::R0);
697 DAG.getMachineFunction().addLiveOut(ARM::R1);
702 //We must use RET_FLAG instead of BRIND because BRIND doesn't have a flag
703 return DAG.getNode(ARMISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1));
706 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
707 // their target countpart wrapped in the ARMISD::Wrapper node. Suppose N is
708 // one of the above mentioned nodes. It has to be wrapped because otherwise
709 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
710 // be used to form addressing mode. These wrapped nodes will be selected
712 static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
713 MVT::ValueType PtrVT = Op.getValueType();
714 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
716 if (CP->isMachineConstantPoolEntry())
717 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
720 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
722 return DAG.getNode(ARMISD::Wrapper, MVT::i32, Res);
725 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
727 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
729 MVT::ValueType PtrVT = getPointerTy();
730 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
731 ARMConstantPoolValue *CPV =
732 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, ARMCP::CPValue,
733 PCAdj, "tlsgd", true);
734 SDOperand Argument = DAG.getTargetConstantPool(CPV, PtrVT, 2);
735 Argument = DAG.getNode(ARMISD::Wrapper, MVT::i32, Argument);
736 Argument = DAG.getLoad(PtrVT, DAG.getEntryNode(), Argument, NULL, 0);
737 SDOperand Chain = Argument.getValue(1);
739 SDOperand PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
740 Argument = DAG.getNode(ARMISD::PIC_ADD, PtrVT, Argument, PICLabel);
742 // call __tls_get_addr.
745 Entry.Node = Argument;
746 Entry.Ty = (const Type *) Type::Int32Ty;
747 Args.push_back(Entry);
748 std::pair<SDOperand, SDOperand> CallResult =
749 LowerCallTo(Chain, (const Type *) Type::Int32Ty, false, false,
750 CallingConv::C, false,
751 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG);
752 return CallResult.first;
755 // Lower ISD::GlobalTLSAddress using the "initial exec" or
756 // "local exec" model.
758 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
760 GlobalValue *GV = GA->getGlobal();
762 SDOperand Chain = DAG.getEntryNode();
763 MVT::ValueType PtrVT = getPointerTy();
764 // Get the Thread Pointer
765 SDOperand ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, PtrVT);
767 if (GV->isDeclaration()){
768 // initial exec model
769 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
770 ARMConstantPoolValue *CPV =
771 new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, ARMCP::CPValue,
772 PCAdj, "gottpoff", true);
773 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 2);
774 Offset = DAG.getNode(ARMISD::Wrapper, MVT::i32, Offset);
775 Offset = DAG.getLoad(PtrVT, Chain, Offset, NULL, 0);
776 Chain = Offset.getValue(1);
778 SDOperand PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
779 Offset = DAG.getNode(ARMISD::PIC_ADD, PtrVT, Offset, PICLabel);
781 Offset = DAG.getLoad(PtrVT, Chain, Offset, NULL, 0);
784 ARMConstantPoolValue *CPV =
785 new ARMConstantPoolValue(GV, ARMCP::CPValue, "tpoff");
786 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 2);
787 Offset = DAG.getNode(ARMISD::Wrapper, MVT::i32, Offset);
788 Offset = DAG.getLoad(PtrVT, Chain, Offset, NULL, 0);
791 // The address of the thread local variable is the add of the thread
792 // pointer with the offset of the variable.
793 return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset);
797 ARMTargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) {
798 // TODO: implement the "local dynamic" model
799 assert(Subtarget->isTargetELF() &&
800 "TLS not implemented for non-ELF targets");
801 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
802 // If the relocation model is PIC, use the "General Dynamic" TLS Model,
803 // otherwise use the "Local Exec" TLS Model
804 if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
805 return LowerToTLSGeneralDynamicModel(GA, DAG);
807 return LowerToTLSExecModels(GA, DAG);
810 SDOperand ARMTargetLowering::LowerGlobalAddressELF(SDOperand Op,
812 MVT::ValueType PtrVT = getPointerTy();
813 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
814 Reloc::Model RelocM = getTargetMachine().getRelocationModel();
815 if (RelocM == Reloc::PIC_) {
816 bool UseGOTOFF = GV->hasInternalLinkage() || GV->hasHiddenVisibility();
817 ARMConstantPoolValue *CPV =
818 new ARMConstantPoolValue(GV, ARMCP::CPValue, UseGOTOFF ? "GOTOFF":"GOT");
819 SDOperand CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 2);
820 CPAddr = DAG.getNode(ARMISD::Wrapper, MVT::i32, CPAddr);
821 SDOperand Result = DAG.getLoad(PtrVT, DAG.getEntryNode(), CPAddr, NULL, 0);
822 SDOperand Chain = Result.getValue(1);
823 SDOperand GOT = DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, PtrVT);
824 Result = DAG.getNode(ISD::ADD, PtrVT, Result, GOT);
826 Result = DAG.getLoad(PtrVT, Chain, Result, NULL, 0);
829 SDOperand CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 2);
830 CPAddr = DAG.getNode(ARMISD::Wrapper, MVT::i32, CPAddr);
831 return DAG.getLoad(PtrVT, DAG.getEntryNode(), CPAddr, NULL, 0);
835 /// GVIsIndirectSymbol - true if the GV will be accessed via an indirect symbol
836 /// even in non-static mode.
837 static bool GVIsIndirectSymbol(GlobalValue *GV, Reloc::Model RelocM) {
838 return RelocM != Reloc::Static &&
839 (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
840 (GV->isDeclaration() && !GV->hasNotBeenReadFromBitcode()));
843 SDOperand ARMTargetLowering::LowerGlobalAddressDarwin(SDOperand Op,
845 MVT::ValueType PtrVT = getPointerTy();
846 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
847 Reloc::Model RelocM = getTargetMachine().getRelocationModel();
848 bool IsIndirect = GVIsIndirectSymbol(GV, RelocM);
850 if (RelocM == Reloc::Static)
851 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 2);
853 unsigned PCAdj = (RelocM != Reloc::PIC_)
854 ? 0 : (Subtarget->isThumb() ? 4 : 8);
855 ARMCP::ARMCPKind Kind = IsIndirect ? ARMCP::CPNonLazyPtr
857 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMPCLabelIndex,
859 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 2);
861 CPAddr = DAG.getNode(ARMISD::Wrapper, MVT::i32, CPAddr);
863 SDOperand Result = DAG.getLoad(PtrVT, DAG.getEntryNode(), CPAddr, NULL, 0);
864 SDOperand Chain = Result.getValue(1);
866 if (RelocM == Reloc::PIC_) {
867 SDOperand PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
868 Result = DAG.getNode(ARMISD::PIC_ADD, PtrVT, Result, PICLabel);
871 Result = DAG.getLoad(PtrVT, Chain, Result, NULL, 0);
876 SDOperand ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDOperand Op,
878 assert(Subtarget->isTargetELF() &&
879 "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
880 MVT::ValueType PtrVT = getPointerTy();
881 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
882 ARMConstantPoolValue *CPV = new ARMConstantPoolValue("_GLOBAL_OFFSET_TABLE_",
884 ARMCP::CPValue, PCAdj);
885 SDOperand CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 2);
886 CPAddr = DAG.getNode(ARMISD::Wrapper, MVT::i32, CPAddr);
887 SDOperand Result = DAG.getLoad(PtrVT, DAG.getEntryNode(), CPAddr, NULL, 0);
888 SDOperand PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
889 return DAG.getNode(ARMISD::PIC_ADD, PtrVT, Result, PICLabel);
892 static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
893 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
894 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue();
896 default: return SDOperand(); // Don't custom lower most intrinsics.
897 case Intrinsic::arm_thread_pointer:
898 return DAG.getNode(ARMISD::THREAD_POINTER, PtrVT);
902 static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG,
903 unsigned VarArgsFrameIndex) {
904 // vastart just stores the address of the VarArgsFrameIndex slot into the
905 // memory location argument.
906 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
907 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
908 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2));
909 return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV->getValue(),
913 static SDOperand LowerFORMAL_ARGUMENT(SDOperand Op, SelectionDAG &DAG,
914 unsigned *vRegs, unsigned ArgNo,
915 unsigned &NumGPRs, unsigned &ArgOffset) {
916 MachineFunction &MF = DAG.getMachineFunction();
917 MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType();
918 SDOperand Root = Op.getOperand(0);
919 std::vector<SDOperand> ArgValues;
920 SSARegMap *RegMap = MF.getSSARegMap();
922 static const unsigned GPRArgRegs[] = {
923 ARM::R0, ARM::R1, ARM::R2, ARM::R3
930 unsigned Flags = Op.getConstantOperandVal(ArgNo + 3);
931 HowToPassArgument(ObjectVT, NumGPRs, ArgOffset, ObjGPRs,
932 ObjSize, GPRPad, StackPad, Flags);
934 ArgOffset += StackPad;
938 unsigned VReg = RegMap->createVirtualRegister(&ARM::GPRRegClass);
939 MF.addLiveIn(GPRArgRegs[NumGPRs], VReg);
940 vRegs[NumGPRs] = VReg;
941 ArgValue = DAG.getCopyFromReg(Root, VReg, MVT::i32);
942 if (ObjectVT == MVT::f32)
943 ArgValue = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, ArgValue);
944 } else if (ObjGPRs == 2) {
945 unsigned VReg = RegMap->createVirtualRegister(&ARM::GPRRegClass);
946 MF.addLiveIn(GPRArgRegs[NumGPRs], VReg);
947 vRegs[NumGPRs] = VReg;
948 ArgValue = DAG.getCopyFromReg(Root, VReg, MVT::i32);
950 VReg = RegMap->createVirtualRegister(&ARM::GPRRegClass);
951 MF.addLiveIn(GPRArgRegs[NumGPRs+1], VReg);
952 vRegs[NumGPRs+1] = VReg;
953 SDOperand ArgValue2 = DAG.getCopyFromReg(Root, VReg, MVT::i32);
955 assert(ObjectVT != MVT::i64 && "i64 should already be lowered");
956 ArgValue = DAG.getNode(ARMISD::FMDRR, MVT::f64, ArgValue, ArgValue2);
961 // If the argument is actually used, emit a load from the right stack
963 if (!Op.Val->hasNUsesOfValue(0, ArgNo)) {
964 MachineFrameInfo *MFI = MF.getFrameInfo();
965 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
966 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
968 ArgValue = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0);
970 SDOperand ArgValue2 = DAG.getLoad(MVT::i32, Root, FIN, NULL, 0);
971 assert(ObjectVT != MVT::i64 && "i64 should already be lowered");
972 ArgValue = DAG.getNode(ARMISD::FMDRR, MVT::f64, ArgValue, ArgValue2);
975 // Don't emit a dead load.
976 ArgValue = DAG.getNode(ISD::UNDEF, ObjectVT);
979 ArgOffset += ObjSize; // Move on to the next argument.
986 ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) {
987 std::vector<SDOperand> ArgValues;
988 SDOperand Root = Op.getOperand(0);
989 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
990 unsigned NumGPRs = 0; // GPRs used for parameter passing.
993 unsigned NumArgs = Op.Val->getNumValues()-1;
994 for (unsigned ArgNo = 0; ArgNo < NumArgs; ++ArgNo)
995 ArgValues.push_back(LowerFORMAL_ARGUMENT(Op, DAG, VRegs, ArgNo,
996 NumGPRs, ArgOffset));
998 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1000 static const unsigned GPRArgRegs[] = {
1001 ARM::R0, ARM::R1, ARM::R2, ARM::R3
1004 MachineFunction &MF = DAG.getMachineFunction();
1005 SSARegMap *RegMap = MF.getSSARegMap();
1006 MachineFrameInfo *MFI = MF.getFrameInfo();
1007 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1008 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
1009 unsigned VARegSize = (4 - NumGPRs) * 4;
1010 unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1);
1011 if (VARegSaveSize) {
1012 // If this function is vararg, store any remaining integer argument regs
1013 // to their spots on the stack so that they may be loaded by deferencing
1014 // the result of va_next.
1015 AFI->setVarArgsRegSaveSize(VARegSaveSize);
1016 VarArgsFrameIndex = MFI->CreateFixedObject(VARegSaveSize, ArgOffset +
1017 VARegSaveSize - VARegSize);
1018 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
1020 SmallVector<SDOperand, 4> MemOps;
1021 for (; NumGPRs < 4; ++NumGPRs) {
1022 unsigned VReg = RegMap->createVirtualRegister(&ARM::GPRRegClass);
1023 MF.addLiveIn(GPRArgRegs[NumGPRs], VReg);
1024 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i32);
1025 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
1026 MemOps.push_back(Store);
1027 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
1028 DAG.getConstant(4, getPointerTy()));
1030 if (!MemOps.empty())
1031 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
1032 &MemOps[0], MemOps.size());
1034 // This will point to the next argument passed via stack.
1035 VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset);
1038 ArgValues.push_back(Root);
1040 // Return the new list of results.
1041 std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(),
1042 Op.Val->value_end());
1043 return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size());
1046 /// isFloatingPointZero - Return true if this is +0.0.
1047 static bool isFloatingPointZero(SDOperand Op) {
1048 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1049 return CFP->getValueAPF().isPosZero();
1050 else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) {
1051 // Maybe this has already been legalized into the constant pool?
1052 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
1053 SDOperand WrapperOp = Op.getOperand(1).getOperand(0);
1054 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
1055 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1056 return CFP->getValueAPF().isPosZero();
1062 static bool isLegalCmpImmediate(unsigned C, bool isThumb) {
1063 return ( isThumb && (C & ~255U) == 0) ||
1064 (!isThumb && ARM_AM::getSOImmVal(C) != -1);
1067 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
1068 /// the given operands.
1069 static SDOperand getARMCmp(SDOperand LHS, SDOperand RHS, ISD::CondCode CC,
1070 SDOperand &ARMCC, SelectionDAG &DAG, bool isThumb) {
1071 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.Val)) {
1072 unsigned C = RHSC->getValue();
1073 if (!isLegalCmpImmediate(C, isThumb)) {
1074 // Constant does not fit, try adjusting it by one?
1079 if (isLegalCmpImmediate(C-1, isThumb)) {
1080 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
1081 RHS = DAG.getConstant(C-1, MVT::i32);
1086 if (C > 0 && isLegalCmpImmediate(C-1, isThumb)) {
1087 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
1088 RHS = DAG.getConstant(C-1, MVT::i32);
1093 if (isLegalCmpImmediate(C+1, isThumb)) {
1094 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
1095 RHS = DAG.getConstant(C+1, MVT::i32);
1100 if (C < 0xffffffff && isLegalCmpImmediate(C+1, isThumb)) {
1101 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
1102 RHS = DAG.getConstant(C+1, MVT::i32);
1109 ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
1110 ARMISD::NodeType CompareType;
1113 CompareType = ARMISD::CMP;
1119 // Uses only N and Z Flags
1120 CompareType = ARMISD::CMPNZ;
1123 ARMCC = DAG.getConstant(CondCode, MVT::i32);
1124 return DAG.getNode(CompareType, MVT::Flag, LHS, RHS);
1127 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
1128 static SDOperand getVFPCmp(SDOperand LHS, SDOperand RHS, SelectionDAG &DAG) {
1130 if (!isFloatingPointZero(RHS))
1131 Cmp = DAG.getNode(ARMISD::CMPFP, MVT::Flag, LHS, RHS);
1133 Cmp = DAG.getNode(ARMISD::CMPFPw0, MVT::Flag, LHS);
1134 return DAG.getNode(ARMISD::FMSTAT, MVT::Flag, Cmp);
1137 static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG,
1138 const ARMSubtarget *ST) {
1139 MVT::ValueType VT = Op.getValueType();
1140 SDOperand LHS = Op.getOperand(0);
1141 SDOperand RHS = Op.getOperand(1);
1142 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
1143 SDOperand TrueVal = Op.getOperand(2);
1144 SDOperand FalseVal = Op.getOperand(3);
1146 if (LHS.getValueType() == MVT::i32) {
1148 SDOperand CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1149 SDOperand Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb());
1150 return DAG.getNode(ARMISD::CMOV, VT, FalseVal, TrueVal, ARMCC, CCR, Cmp);
1153 ARMCC::CondCodes CondCode, CondCode2;
1154 if (FPCCToARMCC(CC, CondCode, CondCode2))
1155 std::swap(TrueVal, FalseVal);
1157 SDOperand ARMCC = DAG.getConstant(CondCode, MVT::i32);
1158 SDOperand CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1159 SDOperand Cmp = getVFPCmp(LHS, RHS, DAG);
1160 SDOperand Result = DAG.getNode(ARMISD::CMOV, VT, FalseVal, TrueVal,
1162 if (CondCode2 != ARMCC::AL) {
1163 SDOperand ARMCC2 = DAG.getConstant(CondCode2, MVT::i32);
1164 // FIXME: Needs another CMP because flag can have but one use.
1165 SDOperand Cmp2 = getVFPCmp(LHS, RHS, DAG);
1166 Result = DAG.getNode(ARMISD::CMOV, VT, Result, TrueVal, ARMCC2, CCR, Cmp2);
1171 static SDOperand LowerBR_CC(SDOperand Op, SelectionDAG &DAG,
1172 const ARMSubtarget *ST) {
1173 SDOperand Chain = Op.getOperand(0);
1174 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
1175 SDOperand LHS = Op.getOperand(2);
1176 SDOperand RHS = Op.getOperand(3);
1177 SDOperand Dest = Op.getOperand(4);
1179 if (LHS.getValueType() == MVT::i32) {
1181 SDOperand CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1182 SDOperand Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb());
1183 return DAG.getNode(ARMISD::BRCOND, MVT::Other, Chain, Dest, ARMCC, CCR,Cmp);
1186 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
1187 ARMCC::CondCodes CondCode, CondCode2;
1188 if (FPCCToARMCC(CC, CondCode, CondCode2))
1189 // Swap the LHS/RHS of the comparison if needed.
1190 std::swap(LHS, RHS);
1192 SDOperand Cmp = getVFPCmp(LHS, RHS, DAG);
1193 SDOperand ARMCC = DAG.getConstant(CondCode, MVT::i32);
1194 SDOperand CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1195 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag);
1196 SDOperand Ops[] = { Chain, Dest, ARMCC, CCR, Cmp };
1197 SDOperand Res = DAG.getNode(ARMISD::BRCOND, VTList, Ops, 5);
1198 if (CondCode2 != ARMCC::AL) {
1199 ARMCC = DAG.getConstant(CondCode2, MVT::i32);
1200 SDOperand Ops[] = { Res, Dest, ARMCC, CCR, Res.getValue(1) };
1201 Res = DAG.getNode(ARMISD::BRCOND, VTList, Ops, 5);
1206 SDOperand ARMTargetLowering::LowerBR_JT(SDOperand Op, SelectionDAG &DAG) {
1207 SDOperand Chain = Op.getOperand(0);
1208 SDOperand Table = Op.getOperand(1);
1209 SDOperand Index = Op.getOperand(2);
1211 MVT::ValueType PTy = getPointerTy();
1212 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
1213 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
1214 SDOperand UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
1215 SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
1216 Table = DAG.getNode(ARMISD::WrapperJT, MVT::i32, JTI, UId);
1217 Index = DAG.getNode(ISD::MUL, PTy, Index, DAG.getConstant(4, PTy));
1218 SDOperand Addr = DAG.getNode(ISD::ADD, PTy, Index, Table);
1219 bool isPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
1220 Addr = DAG.getLoad(isPIC ? (MVT::ValueType)MVT::i32 : PTy,
1221 Chain, Addr, NULL, 0);
1222 Chain = Addr.getValue(1);
1224 Addr = DAG.getNode(ISD::ADD, PTy, Addr, Table);
1225 return DAG.getNode(ARMISD::BR_JT, MVT::Other, Chain, Addr, JTI, UId);
1228 static SDOperand LowerFP_TO_INT(SDOperand Op, SelectionDAG &DAG) {
1230 Op.getOpcode() == ISD::FP_TO_SINT ? ARMISD::FTOSI : ARMISD::FTOUI;
1231 Op = DAG.getNode(Opc, MVT::f32, Op.getOperand(0));
1232 return DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Op);
1235 static SDOperand LowerINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
1236 MVT::ValueType VT = Op.getValueType();
1238 Op.getOpcode() == ISD::SINT_TO_FP ? ARMISD::SITOF : ARMISD::UITOF;
1240 Op = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, Op.getOperand(0));
1241 return DAG.getNode(Opc, VT, Op);
1244 static SDOperand LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) {
1245 // Implement fcopysign with a fabs and a conditional fneg.
1246 SDOperand Tmp0 = Op.getOperand(0);
1247 SDOperand Tmp1 = Op.getOperand(1);
1248 MVT::ValueType VT = Op.getValueType();
1249 MVT::ValueType SrcVT = Tmp1.getValueType();
1250 SDOperand AbsVal = DAG.getNode(ISD::FABS, VT, Tmp0);
1251 SDOperand Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG);
1252 SDOperand ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32);
1253 SDOperand CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1254 return DAG.getNode(ARMISD::CNEG, VT, AbsVal, AbsVal, ARMCC, CCR, Cmp);
1257 SDOperand ARMTargetLowering::LowerMEMCPYInline(SDOperand Chain,
1262 SelectionDAG &DAG) {
1263 // Do repeated 4-byte loads and stores. To be improved.
1264 assert((Align & 3) == 0 && "Expected 4-byte aligned addresses!");
1265 unsigned BytesLeft = Size & 3;
1266 unsigned NumMemOps = Size >> 2;
1267 unsigned EmittedNumMemOps = 0;
1268 unsigned SrcOff = 0, DstOff = 0;
1269 MVT::ValueType VT = MVT::i32;
1270 unsigned VTSize = 4;
1272 const unsigned MAX_LOADS_IN_LDM = 6;
1273 SDOperand TFOps[MAX_LOADS_IN_LDM];
1274 SDOperand Loads[MAX_LOADS_IN_LDM];
1276 // Emit up to MAX_LOADS_IN_LDM loads, then a TokenFactor barrier, then the
1277 // same number of stores. The loads and stores will get combined into
1278 // ldm/stm later on.
1279 while (EmittedNumMemOps < NumMemOps) {
1281 i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
1282 Loads[i] = DAG.getLoad(VT, Chain,
1283 DAG.getNode(ISD::ADD, MVT::i32, Source,
1284 DAG.getConstant(SrcOff, MVT::i32)),
1286 TFOps[i] = Loads[i].getValue(1);
1289 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &TFOps[0], i);
1292 i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
1293 TFOps[i] = DAG.getStore(Chain, Loads[i],
1294 DAG.getNode(ISD::ADD, MVT::i32, Dest,
1295 DAG.getConstant(DstOff, MVT::i32)),
1299 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &TFOps[0], i);
1301 EmittedNumMemOps += i;
1307 // Issue loads / stores for the trailing (1 - 3) bytes.
1308 unsigned BytesLeftSave = BytesLeft;
1311 if (BytesLeft >= 2) {
1319 Loads[i] = DAG.getLoad(VT, Chain,
1320 DAG.getNode(ISD::ADD, MVT::i32, Source,
1321 DAG.getConstant(SrcOff, MVT::i32)),
1323 TFOps[i] = Loads[i].getValue(1);
1326 BytesLeft -= VTSize;
1328 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &TFOps[0], i);
1331 BytesLeft = BytesLeftSave;
1333 if (BytesLeft >= 2) {
1341 TFOps[i] = DAG.getStore(Chain, Loads[i],
1342 DAG.getNode(ISD::ADD, MVT::i32, Dest,
1343 DAG.getConstant(DstOff, MVT::i32)),
1347 BytesLeft -= VTSize;
1349 return DAG.getNode(ISD::TokenFactor, MVT::Other, &TFOps[0], i);
1352 static SDNode *ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
1353 // Turn f64->i64 into FMRRD.
1354 assert(N->getValueType(0) == MVT::i64 &&
1355 N->getOperand(0).getValueType() == MVT::f64);
1357 SDOperand Op = N->getOperand(0);
1358 SDOperand Cvt = DAG.getNode(ARMISD::FMRRD, DAG.getVTList(MVT::i32, MVT::i32),
1361 // Merge the pieces into a single i64 value.
1362 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Cvt, Cvt.getValue(1)).Val;
1365 static SDNode *ExpandSRx(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST) {
1366 assert(N->getValueType(0) == MVT::i64 &&
1367 (N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
1368 "Unknown shift to lower!");
1370 // We only lower SRA, SRL of 1 here, all others use generic lowering.
1371 if (!isa<ConstantSDNode>(N->getOperand(1)) ||
1372 cast<ConstantSDNode>(N->getOperand(1))->getValue() != 1)
1375 // If we are in thumb mode, we don't have RRX.
1376 if (ST->isThumb()) return 0;
1378 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
1379 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, N->getOperand(0),
1380 DAG.getConstant(0, MVT::i32));
1381 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, N->getOperand(0),
1382 DAG.getConstant(1, MVT::i32));
1384 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
1385 // captures the result into a carry flag.
1386 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
1387 Hi = DAG.getNode(Opc, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1);
1389 // The low part is an ARMISD::RRX operand, which shifts the carry in.
1390 Lo = DAG.getNode(ARMISD::RRX, MVT::i32, Lo, Hi.getValue(1));
1392 // Merge the pieces into a single i64 value.
1393 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi).Val;
1397 SDOperand ARMTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
1398 switch (Op.getOpcode()) {
1399 default: assert(0 && "Don't know how to custom lower this!"); abort();
1400 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
1401 case ISD::GlobalAddress:
1402 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) :
1403 LowerGlobalAddressELF(Op, DAG);
1404 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
1405 case ISD::CALL: return LowerCALL(Op, DAG);
1406 case ISD::RET: return LowerRET(Op, DAG);
1407 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, Subtarget);
1408 case ISD::BR_CC: return LowerBR_CC(Op, DAG, Subtarget);
1409 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
1410 case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex);
1411 case ISD::SINT_TO_FP:
1412 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
1413 case ISD::FP_TO_SINT:
1414 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
1415 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
1416 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
1417 case ISD::RETURNADDR: break;
1418 case ISD::FRAMEADDR: break;
1419 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
1420 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG);
1421 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1424 // FIXME: Remove these when LegalizeDAGTypes lands.
1425 case ISD::BIT_CONVERT: return SDOperand(ExpandBIT_CONVERT(Op.Val, DAG), 0);
1427 case ISD::SRA: return SDOperand(ExpandSRx(Op.Val, DAG,Subtarget),0);
1433 /// ExpandOperationResult - Provide custom lowering hooks for expanding
1435 SDNode *ARMTargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) {
1436 switch (N->getOpcode()) {
1437 default: assert(0 && "Don't know how to custom expand this!"); abort();
1438 case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(N, DAG);
1440 case ISD::SRA: return ExpandSRx(N, DAG, Subtarget);
1445 //===----------------------------------------------------------------------===//
1446 // ARM Scheduler Hooks
1447 //===----------------------------------------------------------------------===//
1450 ARMTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1451 MachineBasicBlock *BB) {
1452 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
1453 switch (MI->getOpcode()) {
1454 default: assert(false && "Unexpected instr type to insert");
1455 case ARM::tMOVCCr: {
1456 // To "insert" a SELECT_CC instruction, we actually have to insert the
1457 // diamond control-flow pattern. The incoming instruction knows the
1458 // destination vreg to set, the condition code register to branch on, the
1459 // true/false values to select between, and a branch opcode to use.
1460 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1461 ilist<MachineBasicBlock>::iterator It = BB;
1467 // cmpTY ccX, r1, r2
1469 // fallthrough --> copy0MBB
1470 MachineBasicBlock *thisMBB = BB;
1471 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
1472 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
1473 BuildMI(BB, TII->get(ARM::tBcc)).addMBB(sinkMBB)
1474 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg());
1475 MachineFunction *F = BB->getParent();
1476 F->getBasicBlockList().insert(It, copy0MBB);
1477 F->getBasicBlockList().insert(It, sinkMBB);
1478 // Update machine-CFG edges by first adding all successors of the current
1479 // block to the new block which will contain the Phi node for the select.
1480 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
1481 e = BB->succ_end(); i != e; ++i)
1482 sinkMBB->addSuccessor(*i);
1483 // Next, remove all successors of the current block, and add the true
1484 // and fallthrough blocks as its successors.
1485 while(!BB->succ_empty())
1486 BB->removeSuccessor(BB->succ_begin());
1487 BB->addSuccessor(copy0MBB);
1488 BB->addSuccessor(sinkMBB);
1491 // %FalseValue = ...
1492 // # fallthrough to sinkMBB
1495 // Update machine-CFG edges
1496 BB->addSuccessor(sinkMBB);
1499 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1502 BuildMI(BB, TII->get(ARM::PHI), MI->getOperand(0).getReg())
1503 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
1504 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1506 delete MI; // The pseudo instruction is gone now.
1512 //===----------------------------------------------------------------------===//
1513 // ARM Optimization Hooks
1514 //===----------------------------------------------------------------------===//
1516 /// PerformFMRRDCombine - Target-specific dag combine xforms for ARMISD::FMRRD.
1517 static SDOperand PerformFMRRDCombine(SDNode *N,
1518 TargetLowering::DAGCombinerInfo &DCI) {
1519 // fmrrd(fmdrr x, y) -> x,y
1520 SDOperand InDouble = N->getOperand(0);
1521 if (InDouble.getOpcode() == ARMISD::FMDRR)
1522 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
1526 SDOperand ARMTargetLowering::PerformDAGCombine(SDNode *N,
1527 DAGCombinerInfo &DCI) const {
1528 switch (N->getOpcode()) {
1530 case ARMISD::FMRRD: return PerformFMRRDCombine(N, DCI);
1537 /// isLegalAddressImmediate - Return true if the integer value can be used
1538 /// as the offset of the target addressing mode for load / store of the
1540 static bool isLegalAddressImmediate(int64_t V, MVT::ValueType VT,
1541 const ARMSubtarget *Subtarget) {
1545 if (Subtarget->isThumb()) {
1551 default: return false;
1566 if ((V & (Scale - 1)) != 0)
1569 return V == V & ((1LL << 5) - 1);
1575 default: return false;
1580 return V == V & ((1LL << 12) - 1);
1583 return V == V & ((1LL << 8) - 1);
1586 if (!Subtarget->hasVFP2())
1591 return V == V & ((1LL << 8) - 1);
1595 /// isLegalAddressingMode - Return true if the addressing mode represented
1596 /// by AM is legal for this target, for a load/store of the specified type.
1597 bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1598 const Type *Ty) const {
1599 if (!isLegalAddressImmediate(AM.BaseOffs, getValueType(Ty), Subtarget))
1602 // Can never fold addr of global into load/store.
1607 case 0: // no scale reg, must be "r+i" or "r", or "i".
1610 if (Subtarget->isThumb())
1614 // ARM doesn't support any R+R*scale+imm addr modes.
1618 int Scale = AM.Scale;
1619 switch (getValueType(Ty)) {
1620 default: return false;
1625 // This assumes i64 is legalized to a pair of i32. If not (i.e.
1626 // ldrd / strd are used, then its address mode is same as i16.
1628 if (Scale < 0) Scale = -Scale;
1632 return isPowerOf2_32(Scale & ~1);
1635 if (((unsigned)AM.HasBaseReg + Scale) <= 2)
1640 // Note, we allow "void" uses (basically, uses that aren't loads or
1641 // stores), because arm allows folding a scale into many arithmetic
1642 // operations. This should be made more precise and revisited later.
1644 // Allow r << imm, but the imm has to be a multiple of two.
1645 if (AM.Scale & 1) return false;
1646 return isPowerOf2_32(AM.Scale);
1654 static bool getIndexedAddressParts(SDNode *Ptr, MVT::ValueType VT,
1655 bool isSEXTLoad, SDOperand &Base,
1656 SDOperand &Offset, bool &isInc,
1657 SelectionDAG &DAG) {
1658 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
1661 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
1663 Base = Ptr->getOperand(0);
1664 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
1665 int RHSC = (int)RHS->getValue();
1666 if (RHSC < 0 && RHSC > -256) {
1668 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
1672 isInc = (Ptr->getOpcode() == ISD::ADD);
1673 Offset = Ptr->getOperand(1);
1675 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
1677 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
1678 int RHSC = (int)RHS->getValue();
1679 if (RHSC < 0 && RHSC > -0x1000) {
1681 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
1682 Base = Ptr->getOperand(0);
1687 if (Ptr->getOpcode() == ISD::ADD) {
1689 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0));
1690 if (ShOpcVal != ARM_AM::no_shift) {
1691 Base = Ptr->getOperand(1);
1692 Offset = Ptr->getOperand(0);
1694 Base = Ptr->getOperand(0);
1695 Offset = Ptr->getOperand(1);
1700 isInc = (Ptr->getOpcode() == ISD::ADD);
1701 Base = Ptr->getOperand(0);
1702 Offset = Ptr->getOperand(1);
1706 // FIXME: Use FLDM / FSTM to emulate indexed FP load / store.
1710 /// getPreIndexedAddressParts - returns true by value, base pointer and
1711 /// offset pointer and addressing mode by reference if the node's address
1712 /// can be legally represented as pre-indexed load / store address.
1714 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base,
1716 ISD::MemIndexedMode &AM,
1717 SelectionDAG &DAG) {
1718 if (Subtarget->isThumb())
1723 bool isSEXTLoad = false;
1724 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1725 Ptr = LD->getBasePtr();
1726 VT = LD->getLoadedVT();
1727 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
1728 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
1729 Ptr = ST->getBasePtr();
1730 VT = ST->getStoredVT();
1735 bool isLegal = getIndexedAddressParts(Ptr.Val, VT, isSEXTLoad, Base, Offset,
1738 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
1744 /// getPostIndexedAddressParts - returns true by value, base pointer and
1745 /// offset pointer and addressing mode by reference if this node can be
1746 /// combined with a load / store to form a post-indexed load / store.
1747 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
1750 ISD::MemIndexedMode &AM,
1751 SelectionDAG &DAG) {
1752 if (Subtarget->isThumb())
1757 bool isSEXTLoad = false;
1758 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1759 VT = LD->getLoadedVT();
1760 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
1761 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
1762 VT = ST->getStoredVT();
1767 bool isLegal = getIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
1770 AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
1776 void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
1778 uint64_t &KnownZero,
1780 const SelectionDAG &DAG,
1781 unsigned Depth) const {
1784 switch (Op.getOpcode()) {
1786 case ARMISD::CMOV: {
1787 // Bits are known zero/one if known on the LHS and RHS.
1788 DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
1789 if (KnownZero == 0 && KnownOne == 0) return;
1791 uint64_t KnownZeroRHS, KnownOneRHS;
1792 DAG.ComputeMaskedBits(Op.getOperand(1), Mask,
1793 KnownZeroRHS, KnownOneRHS, Depth+1);
1794 KnownZero &= KnownZeroRHS;
1795 KnownOne &= KnownOneRHS;
1801 //===----------------------------------------------------------------------===//
1802 // ARM Inline Assembly Support
1803 //===----------------------------------------------------------------------===//
1805 /// getConstraintType - Given a constraint letter, return the type of
1806 /// constraint it is for this target.
1807 ARMTargetLowering::ConstraintType
1808 ARMTargetLowering::getConstraintType(const std::string &Constraint) const {
1809 if (Constraint.size() == 1) {
1810 switch (Constraint[0]) {
1812 case 'l': return C_RegisterClass;
1813 case 'w': return C_RegisterClass;
1816 return TargetLowering::getConstraintType(Constraint);
1819 std::pair<unsigned, const TargetRegisterClass*>
1820 ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
1821 MVT::ValueType VT) const {
1822 if (Constraint.size() == 1) {
1823 // GCC RS6000 Constraint Letters
1824 switch (Constraint[0]) {
1826 // FIXME: in thumb mode, 'l' is only low-regs.
1829 return std::make_pair(0U, ARM::GPRRegisterClass);
1832 return std::make_pair(0U, ARM::SPRRegisterClass);
1834 return std::make_pair(0U, ARM::DPRRegisterClass);
1838 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
1841 std::vector<unsigned> ARMTargetLowering::
1842 getRegClassForInlineAsmConstraint(const std::string &Constraint,
1843 MVT::ValueType VT) const {
1844 if (Constraint.size() != 1)
1845 return std::vector<unsigned>();
1847 switch (Constraint[0]) { // GCC ARM Constraint Letters
1851 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
1852 ARM::R4, ARM::R5, ARM::R6, ARM::R7,
1853 ARM::R8, ARM::R9, ARM::R10, ARM::R11,
1854 ARM::R12, ARM::LR, 0);
1857 return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3,
1858 ARM::S4, ARM::S5, ARM::S6, ARM::S7,
1859 ARM::S8, ARM::S9, ARM::S10, ARM::S11,
1860 ARM::S12,ARM::S13,ARM::S14,ARM::S15,
1861 ARM::S16,ARM::S17,ARM::S18,ARM::S19,
1862 ARM::S20,ARM::S21,ARM::S22,ARM::S23,
1863 ARM::S24,ARM::S25,ARM::S26,ARM::S27,
1864 ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0);
1866 return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3,
1867 ARM::D4, ARM::D5, ARM::D6, ARM::D7,
1868 ARM::D8, ARM::D9, ARM::D10,ARM::D11,
1869 ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0);
1873 return std::vector<unsigned>();