1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the PPCISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "PPCISelLowering.h"
15 #include "PPCMachineFunctionInfo.h"
16 #include "PPCPredicates.h"
17 #include "PPCTargetMachine.h"
18 #include "PPCPerfectShuffle.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/VectorExtras.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/PseudoSourceValue.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CallingConv.h"
29 #include "llvm/Constants.h"
30 #include "llvm/Function.h"
31 #include "llvm/Intrinsics.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/DerivedTypes.h"
38 static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc",
39 cl::desc("enable preincrement load/store generation on PPC (experimental)"),
42 PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
43 : TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) {
47 // Use _setjmp/_longjmp instead of setjmp/longjmp.
48 setUseUnderscoreSetJmp(true);
49 setUseUnderscoreLongJmp(true);
51 // Set up the register classes.
52 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
53 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
54 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
56 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
57 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
58 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
60 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
62 // PowerPC has pre-inc load and store's.
63 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
64 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
65 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
66 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
67 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
68 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
69 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
70 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
71 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
72 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
74 // This is used in the ppcf128->int sequence. Note it has different semantics
75 // from FP_ROUND: that rounds to nearest, this rounds to zero.
76 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
78 // PowerPC has no SREM/UREM instructions
79 setOperationAction(ISD::SREM, MVT::i32, Expand);
80 setOperationAction(ISD::UREM, MVT::i32, Expand);
81 setOperationAction(ISD::SREM, MVT::i64, Expand);
82 setOperationAction(ISD::UREM, MVT::i64, Expand);
84 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
85 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
86 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
87 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
88 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
89 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
90 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
91 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
92 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
94 // We don't support sin/cos/sqrt/fmod/pow
95 setOperationAction(ISD::FSIN , MVT::f64, Expand);
96 setOperationAction(ISD::FCOS , MVT::f64, Expand);
97 setOperationAction(ISD::FREM , MVT::f64, Expand);
98 setOperationAction(ISD::FPOW , MVT::f64, Expand);
99 setOperationAction(ISD::FSIN , MVT::f32, Expand);
100 setOperationAction(ISD::FCOS , MVT::f32, Expand);
101 setOperationAction(ISD::FREM , MVT::f32, Expand);
102 setOperationAction(ISD::FPOW , MVT::f32, Expand);
104 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
106 // If we're enabling GP optimizations, use hardware square root
107 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
108 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
109 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
112 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
113 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
115 // PowerPC does not have BSWAP, CTPOP or CTTZ
116 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
117 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
118 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
119 setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
120 setOperationAction(ISD::CTPOP, MVT::i64 , Expand);
121 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
123 // PowerPC does not have ROTR
124 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
125 setOperationAction(ISD::ROTR, MVT::i64 , Expand);
127 // PowerPC does not have Select
128 setOperationAction(ISD::SELECT, MVT::i32, Expand);
129 setOperationAction(ISD::SELECT, MVT::i64, Expand);
130 setOperationAction(ISD::SELECT, MVT::f32, Expand);
131 setOperationAction(ISD::SELECT, MVT::f64, Expand);
133 // PowerPC wants to turn select_cc of FP into fsel when possible.
134 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
135 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
137 // PowerPC wants to optimize integer setcc a bit
138 setOperationAction(ISD::SETCC, MVT::i32, Custom);
140 // PowerPC does not have BRCOND which requires SetCC
141 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
143 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
145 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
146 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
148 // PowerPC does not have [U|S]INT_TO_FP
149 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
150 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
152 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
153 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
154 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand);
155 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand);
157 // We cannot sextinreg(i1). Expand to shifts.
158 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
160 // Support label based line numbers.
161 setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
162 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
164 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
165 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
166 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
167 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
170 // We want to legalize GlobalAddress and ConstantPool nodes into the
171 // appropriate instructions to materialize the address.
172 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
173 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
174 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
175 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
176 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
177 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
178 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
179 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
181 // RET must be custom lowered, to meet ABI requirements.
182 setOperationAction(ISD::RET , MVT::Other, Custom);
185 setOperationAction(ISD::TRAP, MVT::Other, Legal);
187 // TRAMPOLINE is custom lowered.
188 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
190 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
191 setOperationAction(ISD::VASTART , MVT::Other, Custom);
193 // VAARG is custom lowered with ELF 32 ABI
194 if (TM.getSubtarget<PPCSubtarget>().isELF32_ABI())
195 setOperationAction(ISD::VAARG, MVT::Other, Custom);
197 setOperationAction(ISD::VAARG, MVT::Other, Expand);
199 // Use the default implementation.
200 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
201 setOperationAction(ISD::VAEND , MVT::Other, Expand);
202 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
203 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom);
204 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
205 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
207 // We want to custom lower some of our intrinsics.
208 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
210 // Comparisons that require checking two conditions.
211 setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
212 setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
213 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
214 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
215 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
216 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
217 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
218 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
219 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
220 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
221 setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
222 setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
224 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
225 // They also have instructions for converting between i64 and fp.
226 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
227 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
228 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
229 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
230 // This is just the low 32 bits of a (signed) fp->i64 conversion.
231 // We cannot do this with Promote because i64 is not a legal type.
232 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
234 // FIXME: disable this lowered code. This generates 64-bit register values,
235 // and we don't model the fact that the top part is clobbered by calls. We
236 // need to flag these together so that the value isn't live across a call.
237 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
239 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
240 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
243 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) {
244 // 64-bit PowerPC implementations can support i64 types directly
245 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
246 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
247 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
248 // 64-bit PowerPC wants to expand i128 shifts itself.
249 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
250 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
251 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
253 // 32-bit PowerPC wants to expand i64 shifts itself.
254 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
255 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
256 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
259 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
260 // First set operation action for all vector types to expand. Then we
261 // will selectively turn on ones that can be effectively codegen'd.
262 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
263 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
264 MVT VT = (MVT::SimpleValueType)i;
266 // add/sub are legal for all supported vector VT's.
267 setOperationAction(ISD::ADD , VT, Legal);
268 setOperationAction(ISD::SUB , VT, Legal);
270 // We promote all shuffles to v16i8.
271 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
272 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
274 // We promote all non-typed operations to v4i32.
275 setOperationAction(ISD::AND , VT, Promote);
276 AddPromotedToType (ISD::AND , VT, MVT::v4i32);
277 setOperationAction(ISD::OR , VT, Promote);
278 AddPromotedToType (ISD::OR , VT, MVT::v4i32);
279 setOperationAction(ISD::XOR , VT, Promote);
280 AddPromotedToType (ISD::XOR , VT, MVT::v4i32);
281 setOperationAction(ISD::LOAD , VT, Promote);
282 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32);
283 setOperationAction(ISD::SELECT, VT, Promote);
284 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
285 setOperationAction(ISD::STORE, VT, Promote);
286 AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
288 // No other operations are legal.
289 setOperationAction(ISD::MUL , VT, Expand);
290 setOperationAction(ISD::SDIV, VT, Expand);
291 setOperationAction(ISD::SREM, VT, Expand);
292 setOperationAction(ISD::UDIV, VT, Expand);
293 setOperationAction(ISD::UREM, VT, Expand);
294 setOperationAction(ISD::FDIV, VT, Expand);
295 setOperationAction(ISD::FNEG, VT, Expand);
296 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
297 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
298 setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
299 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
300 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
301 setOperationAction(ISD::UDIVREM, VT, Expand);
302 setOperationAction(ISD::SDIVREM, VT, Expand);
303 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
304 setOperationAction(ISD::FPOW, VT, Expand);
305 setOperationAction(ISD::CTPOP, VT, Expand);
306 setOperationAction(ISD::CTLZ, VT, Expand);
307 setOperationAction(ISD::CTTZ, VT, Expand);
310 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
311 // with merges, splats, etc.
312 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
314 setOperationAction(ISD::AND , MVT::v4i32, Legal);
315 setOperationAction(ISD::OR , MVT::v4i32, Legal);
316 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
317 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
318 setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
319 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
321 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
322 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
323 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
324 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
326 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
327 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
328 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
329 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
331 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
332 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
334 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
335 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
336 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
337 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
340 setShiftAmountType(MVT::i32);
341 setBooleanContents(ZeroOrOneBooleanContent);
343 if (TM.getSubtarget<PPCSubtarget>().isPPC64()) {
344 setStackPointerRegisterToSaveRestore(PPC::X1);
345 setExceptionPointerRegister(PPC::X3);
346 setExceptionSelectorRegister(PPC::X4);
348 setStackPointerRegisterToSaveRestore(PPC::R1);
349 setExceptionPointerRegister(PPC::R3);
350 setExceptionSelectorRegister(PPC::R4);
353 // We have target-specific dag combine patterns for the following nodes:
354 setTargetDAGCombine(ISD::SINT_TO_FP);
355 setTargetDAGCombine(ISD::STORE);
356 setTargetDAGCombine(ISD::BR_CC);
357 setTargetDAGCombine(ISD::BSWAP);
359 // Darwin long double math library functions have $LDBL128 appended.
360 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) {
361 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
362 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
363 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
364 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
365 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
366 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
367 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
368 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
369 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
370 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
373 computeRegisterProperties();
376 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
377 /// function arguments in the caller parameter area.
378 unsigned PPCTargetLowering::getByValTypeAlignment(const Type *Ty) const {
379 TargetMachine &TM = getTargetMachine();
380 // Darwin passes everything on 4 byte boundary.
381 if (TM.getSubtarget<PPCSubtarget>().isDarwin())
387 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
390 case PPCISD::FSEL: return "PPCISD::FSEL";
391 case PPCISD::FCFID: return "PPCISD::FCFID";
392 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
393 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
394 case PPCISD::STFIWX: return "PPCISD::STFIWX";
395 case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
396 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
397 case PPCISD::VPERM: return "PPCISD::VPERM";
398 case PPCISD::Hi: return "PPCISD::Hi";
399 case PPCISD::Lo: return "PPCISD::Lo";
400 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
401 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
402 case PPCISD::SRL: return "PPCISD::SRL";
403 case PPCISD::SRA: return "PPCISD::SRA";
404 case PPCISD::SHL: return "PPCISD::SHL";
405 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32";
406 case PPCISD::STD_32: return "PPCISD::STD_32";
407 case PPCISD::CALL_ELF: return "PPCISD::CALL_ELF";
408 case PPCISD::CALL_Macho: return "PPCISD::CALL_Macho";
409 case PPCISD::MTCTR: return "PPCISD::MTCTR";
410 case PPCISD::BCTRL_Macho: return "PPCISD::BCTRL_Macho";
411 case PPCISD::BCTRL_ELF: return "PPCISD::BCTRL_ELF";
412 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
413 case PPCISD::MFCR: return "PPCISD::MFCR";
414 case PPCISD::VCMP: return "PPCISD::VCMP";
415 case PPCISD::VCMPo: return "PPCISD::VCMPo";
416 case PPCISD::LBRX: return "PPCISD::LBRX";
417 case PPCISD::STBRX: return "PPCISD::STBRX";
418 case PPCISD::LARX: return "PPCISD::LARX";
419 case PPCISD::STCX: return "PPCISD::STCX";
420 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
421 case PPCISD::MFFS: return "PPCISD::MFFS";
422 case PPCISD::MTFSB0: return "PPCISD::MTFSB0";
423 case PPCISD::MTFSB1: return "PPCISD::MTFSB1";
424 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
425 case PPCISD::MTFSF: return "PPCISD::MTFSF";
426 case PPCISD::TAILCALL: return "PPCISD::TAILCALL";
427 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
431 MVT PPCTargetLowering::getSetCCResultType(MVT VT) const {
435 /// getFunctionAlignment - Return the Log2 alignment of this function.
436 unsigned PPCTargetLowering::getFunctionAlignment(const Function *F) const {
437 if (getTargetMachine().getSubtarget<PPCSubtarget>().isDarwin())
438 return F->hasFnAttr(Attribute::OptimizeForSize) ? 2 : 4;
443 //===----------------------------------------------------------------------===//
444 // Node matching predicates, for use by the tblgen matching code.
445 //===----------------------------------------------------------------------===//
447 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
448 static bool isFloatingPointZero(SDValue Op) {
449 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
450 return CFP->getValueAPF().isZero();
451 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
452 // Maybe this has already been legalized into the constant pool?
453 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
454 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
455 return CFP->getValueAPF().isZero();
460 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
461 /// true if Op is undef or if it matches the specified value.
462 static bool isConstantOrUndef(int Op, int Val) {
463 return Op < 0 || Op == Val;
466 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
467 /// VPKUHUM instruction.
468 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) {
470 for (unsigned i = 0; i != 16; ++i)
471 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
474 for (unsigned i = 0; i != 8; ++i)
475 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1) ||
476 !isConstantOrUndef(N->getMaskElt(i+8), i*2+1))
482 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
483 /// VPKUWUM instruction.
484 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) {
486 for (unsigned i = 0; i != 16; i += 2)
487 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) ||
488 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3))
491 for (unsigned i = 0; i != 8; i += 2)
492 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) ||
493 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3) ||
494 !isConstantOrUndef(N->getMaskElt(i+8), i*2+2) ||
495 !isConstantOrUndef(N->getMaskElt(i+9), i*2+3))
501 /// isVMerge - Common function, used to match vmrg* shuffles.
503 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
504 unsigned LHSStart, unsigned RHSStart) {
505 assert(N->getValueType(0) == MVT::v16i8 &&
506 "PPC only supports shuffles by bytes!");
507 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
508 "Unsupported merge size!");
510 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
511 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
512 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
513 LHSStart+j+i*UnitSize) ||
514 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
515 RHSStart+j+i*UnitSize))
521 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
522 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
523 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
526 return isVMerge(N, UnitSize, 8, 24);
527 return isVMerge(N, UnitSize, 8, 8);
530 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
531 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
532 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
535 return isVMerge(N, UnitSize, 0, 16);
536 return isVMerge(N, UnitSize, 0, 0);
540 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
541 /// amount, otherwise return -1.
542 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
543 assert(N->getValueType(0) == MVT::v16i8 &&
544 "PPC only supports shuffles by bytes!");
546 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
548 // Find the first non-undef value in the shuffle mask.
550 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
553 if (i == 16) return -1; // all undef.
555 // Otherwise, check to see if the rest of the elements are consecutively
556 // numbered from this value.
557 unsigned ShiftAmt = SVOp->getMaskElt(i);
558 if (ShiftAmt < i) return -1;
562 // Check the rest of the elements to see if they are consecutive.
563 for (++i; i != 16; ++i)
564 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
567 // Check the rest of the elements to see if they are consecutive.
568 for (++i; i != 16; ++i)
569 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
575 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
576 /// specifies a splat of a single element that is suitable for input to
577 /// VSPLTB/VSPLTH/VSPLTW.
578 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
579 assert(N->getValueType(0) == MVT::v16i8 &&
580 (EltSize == 1 || EltSize == 2 || EltSize == 4));
582 // This is a splat operation if each element of the permute is the same, and
583 // if the value doesn't reference the second vector.
584 unsigned ElementBase = N->getMaskElt(0);
586 // FIXME: Handle UNDEF elements too!
587 if (ElementBase >= 16)
590 // Check that the indices are consecutive, in the case of a multi-byte element
591 // splatted with a v16i8 mask.
592 for (unsigned i = 1; i != EltSize; ++i)
593 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
596 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
597 if (N->getMaskElt(i) < 0) continue;
598 for (unsigned j = 0; j != EltSize; ++j)
599 if (N->getMaskElt(i+j) != N->getMaskElt(j))
605 /// isAllNegativeZeroVector - Returns true if all elements of build_vector
607 bool PPC::isAllNegativeZeroVector(SDNode *N) {
608 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
610 APInt APVal, APUndef;
614 if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32))
615 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
616 return CFP->getValueAPF().isNegZero();
621 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
622 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
623 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
624 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
625 assert(isSplatShuffleMask(SVOp, EltSize));
626 return SVOp->getMaskElt(0) / EltSize;
629 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
630 /// by using a vspltis[bhw] instruction of the specified element size, return
631 /// the constant being splatted. The ByteSize field indicates the number of
632 /// bytes of each element [124] -> [bhw].
633 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
636 // If ByteSize of the splat is bigger than the element size of the
637 // build_vector, then we have a case where we are checking for a splat where
638 // multiple elements of the buildvector are folded together into a single
639 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
640 unsigned EltSize = 16/N->getNumOperands();
641 if (EltSize < ByteSize) {
642 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
643 SDValue UniquedVals[4];
644 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
646 // See if all of the elements in the buildvector agree across.
647 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
648 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
649 // If the element isn't a constant, bail fully out.
650 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
653 if (UniquedVals[i&(Multiple-1)].getNode() == 0)
654 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
655 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
656 return SDValue(); // no match.
659 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
660 // either constant or undef values that are identical for each chunk. See
661 // if these chunks can form into a larger vspltis*.
663 // Check to see if all of the leading entries are either 0 or -1. If
664 // neither, then this won't fit into the immediate field.
665 bool LeadingZero = true;
666 bool LeadingOnes = true;
667 for (unsigned i = 0; i != Multiple-1; ++i) {
668 if (UniquedVals[i].getNode() == 0) continue; // Must have been undefs.
670 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
671 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
673 // Finally, check the least significant entry.
675 if (UniquedVals[Multiple-1].getNode() == 0)
676 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef
677 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
679 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4)
682 if (UniquedVals[Multiple-1].getNode() == 0)
683 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef
684 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
685 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
686 return DAG.getTargetConstant(Val, MVT::i32);
692 // Check to see if this buildvec has a single non-undef value in its elements.
693 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
694 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
695 if (OpVal.getNode() == 0)
696 OpVal = N->getOperand(i);
697 else if (OpVal != N->getOperand(i))
701 if (OpVal.getNode() == 0) return SDValue(); // All UNDEF: use implicit def.
703 unsigned ValSizeInBytes = EltSize;
705 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
706 Value = CN->getZExtValue();
707 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
708 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
709 Value = FloatToBits(CN->getValueAPF().convertToFloat());
712 // If the splat value is larger than the element value, then we can never do
713 // this splat. The only case that we could fit the replicated bits into our
714 // immediate field for would be zero, and we prefer to use vxor for it.
715 if (ValSizeInBytes < ByteSize) return SDValue();
717 // If the element value is larger than the splat value, cut it in half and
718 // check to see if the two halves are equal. Continue doing this until we
719 // get to ByteSize. This allows us to handle 0x01010101 as 0x01.
720 while (ValSizeInBytes > ByteSize) {
721 ValSizeInBytes >>= 1;
723 // If the top half equals the bottom half, we're still ok.
724 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
725 (Value & ((1 << (8*ValSizeInBytes))-1)))
729 // Properly sign extend the value.
730 int ShAmt = (4-ByteSize)*8;
731 int MaskVal = ((int)Value << ShAmt) >> ShAmt;
733 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
734 if (MaskVal == 0) return SDValue();
736 // Finally, if this value fits in a 5 bit sext field, return it
737 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal)
738 return DAG.getTargetConstant(MaskVal, MVT::i32);
742 //===----------------------------------------------------------------------===//
743 // Addressing Mode Selection
744 //===----------------------------------------------------------------------===//
746 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
747 /// or 64-bit immediate, and if the value can be accurately represented as a
748 /// sign extension from a 16-bit value. If so, this returns true and the
750 static bool isIntS16Immediate(SDNode *N, short &Imm) {
751 if (N->getOpcode() != ISD::Constant)
754 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
755 if (N->getValueType(0) == MVT::i32)
756 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
758 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
760 static bool isIntS16Immediate(SDValue Op, short &Imm) {
761 return isIntS16Immediate(Op.getNode(), Imm);
765 /// SelectAddressRegReg - Given the specified addressed, check to see if it
766 /// can be represented as an indexed [r+r] operation. Returns false if it
767 /// can be more efficiently represented with [r+imm].
768 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
770 SelectionDAG &DAG) const {
772 if (N.getOpcode() == ISD::ADD) {
773 if (isIntS16Immediate(N.getOperand(1), imm))
775 if (N.getOperand(1).getOpcode() == PPCISD::Lo)
778 Base = N.getOperand(0);
779 Index = N.getOperand(1);
781 } else if (N.getOpcode() == ISD::OR) {
782 if (isIntS16Immediate(N.getOperand(1), imm))
783 return false; // r+i can fold it if we can.
785 // If this is an or of disjoint bitfields, we can codegen this as an add
786 // (for better address arithmetic) if the LHS and RHS of the OR are provably
788 APInt LHSKnownZero, LHSKnownOne;
789 APInt RHSKnownZero, RHSKnownOne;
790 DAG.ComputeMaskedBits(N.getOperand(0),
791 APInt::getAllOnesValue(N.getOperand(0)
792 .getValueSizeInBits()),
793 LHSKnownZero, LHSKnownOne);
795 if (LHSKnownZero.getBoolValue()) {
796 DAG.ComputeMaskedBits(N.getOperand(1),
797 APInt::getAllOnesValue(N.getOperand(1)
798 .getValueSizeInBits()),
799 RHSKnownZero, RHSKnownOne);
800 // If all of the bits are known zero on the LHS or RHS, the add won't
802 if (~(LHSKnownZero | RHSKnownZero) == 0) {
803 Base = N.getOperand(0);
804 Index = N.getOperand(1);
813 /// Returns true if the address N can be represented by a base register plus
814 /// a signed 16-bit displacement [r+imm], and if it is not better
815 /// represented as reg+reg.
816 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
818 SelectionDAG &DAG) const {
819 // FIXME dl should come from parent load or store, not from address
820 DebugLoc dl = N.getDebugLoc();
821 // If this can be more profitably realized as r+r, fail.
822 if (SelectAddressRegReg(N, Disp, Base, DAG))
825 if (N.getOpcode() == ISD::ADD) {
827 if (isIntS16Immediate(N.getOperand(1), imm)) {
828 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
829 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
830 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
832 Base = N.getOperand(0);
834 return true; // [r+i]
835 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
836 // Match LOAD (ADD (X, Lo(G))).
837 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
838 && "Cannot handle constant offsets yet!");
839 Disp = N.getOperand(1).getOperand(0); // The global address.
840 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
841 Disp.getOpcode() == ISD::TargetConstantPool ||
842 Disp.getOpcode() == ISD::TargetJumpTable);
843 Base = N.getOperand(0);
844 return true; // [&g+r]
846 } else if (N.getOpcode() == ISD::OR) {
848 if (isIntS16Immediate(N.getOperand(1), imm)) {
849 // If this is an or of disjoint bitfields, we can codegen this as an add
850 // (for better address arithmetic) if the LHS and RHS of the OR are
851 // provably disjoint.
852 APInt LHSKnownZero, LHSKnownOne;
853 DAG.ComputeMaskedBits(N.getOperand(0),
854 APInt::getAllOnesValue(N.getOperand(0)
855 .getValueSizeInBits()),
856 LHSKnownZero, LHSKnownOne);
858 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
859 // If all of the bits are known zero on the LHS or RHS, the add won't
861 Base = N.getOperand(0);
862 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
866 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
867 // Loading from a constant address.
869 // If this address fits entirely in a 16-bit sext immediate field, codegen
872 if (isIntS16Immediate(CN, Imm)) {
873 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
874 Base = DAG.getRegister(PPC::R0, CN->getValueType(0));
878 // Handle 32-bit sext immediates with LIS + addr mode.
879 if (CN->getValueType(0) == MVT::i32 ||
880 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
881 int Addr = (int)CN->getZExtValue();
883 // Otherwise, break this down into an LIS + disp.
884 Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
886 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32);
887 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
888 Base = SDValue(DAG.getTargetNode(Opc, dl, CN->getValueType(0), Base), 0);
893 Disp = DAG.getTargetConstant(0, getPointerTy());
894 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
895 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
898 return true; // [r+0]
901 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
902 /// represented as an indexed [r+r] operation.
903 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
905 SelectionDAG &DAG) const {
906 // Check to see if we can easily represent this as an [r+r] address. This
907 // will fail if it thinks that the address is more profitably represented as
908 // reg+imm, e.g. where imm = 0.
909 if (SelectAddressRegReg(N, Base, Index, DAG))
912 // If the operand is an addition, always emit this as [r+r], since this is
913 // better (for code size, and execution, as the memop does the add for free)
914 // than emitting an explicit add.
915 if (N.getOpcode() == ISD::ADD) {
916 Base = N.getOperand(0);
917 Index = N.getOperand(1);
921 // Otherwise, do it the hard way, using R0 as the base register.
922 Base = DAG.getRegister(PPC::R0, N.getValueType());
927 /// SelectAddressRegImmShift - Returns true if the address N can be
928 /// represented by a base register plus a signed 14-bit displacement
929 /// [r+imm*4]. Suitable for use by STD and friends.
930 bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp,
932 SelectionDAG &DAG) const {
933 // FIXME dl should come from the parent load or store, not the address
934 DebugLoc dl = N.getDebugLoc();
935 // If this can be more profitably realized as r+r, fail.
936 if (SelectAddressRegReg(N, Disp, Base, DAG))
939 if (N.getOpcode() == ISD::ADD) {
941 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
942 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
943 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
944 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
946 Base = N.getOperand(0);
948 return true; // [r+i]
949 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
950 // Match LOAD (ADD (X, Lo(G))).
951 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
952 && "Cannot handle constant offsets yet!");
953 Disp = N.getOperand(1).getOperand(0); // The global address.
954 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
955 Disp.getOpcode() == ISD::TargetConstantPool ||
956 Disp.getOpcode() == ISD::TargetJumpTable);
957 Base = N.getOperand(0);
958 return true; // [&g+r]
960 } else if (N.getOpcode() == ISD::OR) {
962 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
963 // If this is an or of disjoint bitfields, we can codegen this as an add
964 // (for better address arithmetic) if the LHS and RHS of the OR are
965 // provably disjoint.
966 APInt LHSKnownZero, LHSKnownOne;
967 DAG.ComputeMaskedBits(N.getOperand(0),
968 APInt::getAllOnesValue(N.getOperand(0)
969 .getValueSizeInBits()),
970 LHSKnownZero, LHSKnownOne);
971 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
972 // If all of the bits are known zero on the LHS or RHS, the add won't
974 Base = N.getOperand(0);
975 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
979 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
980 // Loading from a constant address. Verify low two bits are clear.
981 if ((CN->getZExtValue() & 3) == 0) {
982 // If this address fits entirely in a 14-bit sext immediate field, codegen
985 if (isIntS16Immediate(CN, Imm)) {
986 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy());
987 Base = DAG.getRegister(PPC::R0, CN->getValueType(0));
991 // Fold the low-part of 32-bit absolute addresses into addr mode.
992 if (CN->getValueType(0) == MVT::i32 ||
993 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
994 int Addr = (int)CN->getZExtValue();
996 // Otherwise, break this down into an LIS + disp.
997 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32);
998 Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32);
999 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
1000 Base = SDValue(DAG.getTargetNode(Opc, dl, CN->getValueType(0), Base),0);
1006 Disp = DAG.getTargetConstant(0, getPointerTy());
1007 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
1008 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
1011 return true; // [r+0]
1015 /// getPreIndexedAddressParts - returns true by value, base pointer and
1016 /// offset pointer and addressing mode by reference if the node's address
1017 /// can be legally represented as pre-indexed load / store address.
1018 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
1020 ISD::MemIndexedMode &AM,
1021 SelectionDAG &DAG) const {
1022 // Disabled by default for now.
1023 if (!EnablePPCPreinc) return false;
1027 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1028 Ptr = LD->getBasePtr();
1029 VT = LD->getMemoryVT();
1031 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
1033 Ptr = ST->getBasePtr();
1034 VT = ST->getMemoryVT();
1038 // PowerPC doesn't have preinc load/store instructions for vectors.
1042 // TODO: Check reg+reg first.
1044 // LDU/STU use reg+imm*4, others use reg+imm.
1045 if (VT != MVT::i64) {
1047 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG))
1051 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG))
1055 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1056 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
1057 // sext i32 to i64 when addr mode is r+i.
1058 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
1059 LD->getExtensionType() == ISD::SEXTLOAD &&
1060 isa<ConstantSDNode>(Offset))
1068 //===----------------------------------------------------------------------===//
1069 // LowerOperation implementation
1070 //===----------------------------------------------------------------------===//
1072 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
1073 SelectionDAG &DAG) {
1074 MVT PtrVT = Op.getValueType();
1075 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1076 Constant *C = CP->getConstVal();
1077 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
1078 SDValue Zero = DAG.getConstant(0, PtrVT);
1079 // FIXME there isn't really any debug info here
1080 DebugLoc dl = Op.getDebugLoc();
1082 const TargetMachine &TM = DAG.getTarget();
1084 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, CPI, Zero);
1085 SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, CPI, Zero);
1087 // If this is a non-darwin platform, we don't support non-static relo models
1089 if (TM.getRelocationModel() == Reloc::Static ||
1090 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
1091 // Generate non-pic code that has direct accesses to the constant pool.
1092 // The address of the global is just (hi(&g)+lo(&g)).
1093 return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
1096 if (TM.getRelocationModel() == Reloc::PIC_) {
1097 // With PIC, the first instruction is actually "GR+hi(&G)".
1098 Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
1099 DAG.getNode(PPCISD::GlobalBaseReg,
1100 DebugLoc::getUnknownLoc(), PtrVT), Hi);
1103 Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
1107 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
1108 MVT PtrVT = Op.getValueType();
1109 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1110 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
1111 SDValue Zero = DAG.getConstant(0, PtrVT);
1112 // FIXME there isn't really any debug loc here
1113 DebugLoc dl = Op.getDebugLoc();
1115 const TargetMachine &TM = DAG.getTarget();
1117 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, JTI, Zero);
1118 SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, JTI, Zero);
1120 // If this is a non-darwin platform, we don't support non-static relo models
1122 if (TM.getRelocationModel() == Reloc::Static ||
1123 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
1124 // Generate non-pic code that has direct accesses to the constant pool.
1125 // The address of the global is just (hi(&g)+lo(&g)).
1126 return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
1129 if (TM.getRelocationModel() == Reloc::PIC_) {
1130 // With PIC, the first instruction is actually "GR+hi(&G)".
1131 Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
1132 DAG.getNode(PPCISD::GlobalBaseReg,
1133 DebugLoc::getUnknownLoc(), PtrVT), Hi);
1136 Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
1140 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1141 SelectionDAG &DAG) {
1142 assert(0 && "TLS not implemented for PPC.");
1143 return SDValue(); // Not reached
1146 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
1147 SelectionDAG &DAG) {
1148 MVT PtrVT = Op.getValueType();
1149 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
1150 GlobalValue *GV = GSDN->getGlobal();
1151 SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
1152 SDValue Zero = DAG.getConstant(0, PtrVT);
1153 // FIXME there isn't really any debug info here
1154 DebugLoc dl = GSDN->getDebugLoc();
1156 const TargetMachine &TM = DAG.getTarget();
1158 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, GA, Zero);
1159 SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, GA, Zero);
1161 // If this is a non-darwin platform, we don't support non-static relo models
1163 if (TM.getRelocationModel() == Reloc::Static ||
1164 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
1165 // Generate non-pic code that has direct accesses to globals.
1166 // The address of the global is just (hi(&g)+lo(&g)).
1167 return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
1170 if (TM.getRelocationModel() == Reloc::PIC_) {
1171 // With PIC, the first instruction is actually "GR+hi(&G)".
1172 Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
1173 DAG.getNode(PPCISD::GlobalBaseReg,
1174 DebugLoc::getUnknownLoc(), PtrVT), Hi);
1177 Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
1179 if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV))
1182 // If the global is weak or external, we have to go through the lazy
1184 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Lo, NULL, 0);
1187 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
1188 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1189 DebugLoc dl = Op.getDebugLoc();
1191 // If we're comparing for equality to zero, expose the fact that this is
1192 // implented as a ctlz/srl pair on ppc, so that the dag combiner can
1193 // fold the new nodes.
1194 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1195 if (C->isNullValue() && CC == ISD::SETEQ) {
1196 MVT VT = Op.getOperand(0).getValueType();
1197 SDValue Zext = Op.getOperand(0);
1198 if (VT.bitsLT(MVT::i32)) {
1200 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
1202 unsigned Log2b = Log2_32(VT.getSizeInBits());
1203 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
1204 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
1205 DAG.getConstant(Log2b, MVT::i32));
1206 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
1208 // Leave comparisons against 0 and -1 alone for now, since they're usually
1209 // optimized. FIXME: revisit this when we can custom lower all setcc
1211 if (C->isAllOnesValue() || C->isNullValue())
1215 // If we have an integer seteq/setne, turn it into a compare against zero
1216 // by xor'ing the rhs with the lhs, which is faster than setting a
1217 // condition register, reading it back out, and masking the correct bit. The
1218 // normal approach here uses sub to do this instead of xor. Using xor exposes
1219 // the result to other bit-twiddling opportunities.
1220 MVT LHSVT = Op.getOperand(0).getValueType();
1221 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1222 MVT VT = Op.getValueType();
1223 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
1225 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC);
1230 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
1231 int VarArgsFrameIndex,
1232 int VarArgsStackOffset,
1233 unsigned VarArgsNumGPR,
1234 unsigned VarArgsNumFPR,
1235 const PPCSubtarget &Subtarget) {
1237 assert(0 && "VAARG in ELF32 ABI not implemented yet!");
1238 return SDValue(); // Not reached
1241 SDValue PPCTargetLowering::LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
1242 SDValue Chain = Op.getOperand(0);
1243 SDValue Trmp = Op.getOperand(1); // trampoline
1244 SDValue FPtr = Op.getOperand(2); // nested function
1245 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
1246 DebugLoc dl = Op.getDebugLoc();
1248 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1249 bool isPPC64 = (PtrVT == MVT::i64);
1250 const Type *IntPtrTy =
1251 DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType();
1253 TargetLowering::ArgListTy Args;
1254 TargetLowering::ArgListEntry Entry;
1256 Entry.Ty = IntPtrTy;
1257 Entry.Node = Trmp; Args.push_back(Entry);
1259 // TrampSize == (isPPC64 ? 48 : 40);
1260 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40,
1261 isPPC64 ? MVT::i64 : MVT::i32);
1262 Args.push_back(Entry);
1264 Entry.Node = FPtr; Args.push_back(Entry);
1265 Entry.Node = Nest; Args.push_back(Entry);
1267 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
1268 std::pair<SDValue, SDValue> CallResult =
1269 LowerCallTo(Chain, Op.getValueType().getTypeForMVT(), false, false,
1270 false, false, CallingConv::C, false,
1271 DAG.getExternalSymbol("__trampoline_setup", PtrVT),
1275 { CallResult.first, CallResult.second };
1277 return DAG.getMergeValues(Ops, 2, dl);
1280 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
1281 int VarArgsFrameIndex,
1282 int VarArgsStackOffset,
1283 unsigned VarArgsNumGPR,
1284 unsigned VarArgsNumFPR,
1285 const PPCSubtarget &Subtarget) {
1286 DebugLoc dl = Op.getDebugLoc();
1288 if (Subtarget.isMachoABI()) {
1289 // vastart just stores the address of the VarArgsFrameIndex slot into the
1290 // memory location argument.
1291 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1292 SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1293 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1294 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0);
1297 // For ELF 32 ABI we follow the layout of the va_list struct.
1298 // We suppose the given va_list is already allocated.
1301 // char gpr; /* index into the array of 8 GPRs
1302 // * stored in the register save area
1303 // * gpr=0 corresponds to r3,
1304 // * gpr=1 to r4, etc.
1306 // char fpr; /* index into the array of 8 FPRs
1307 // * stored in the register save area
1308 // * fpr=0 corresponds to f1,
1309 // * fpr=1 to f2, etc.
1311 // char *overflow_arg_area;
1312 // /* location on stack that holds
1313 // * the next overflow argument
1315 // char *reg_save_area;
1316 // /* where r3:r10 and f1:f8 (if saved)
1322 SDValue ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i8);
1323 SDValue ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8);
1326 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1328 SDValue StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT);
1329 SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1331 uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
1332 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT);
1334 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
1335 SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT);
1337 uint64_t FPROffset = 1;
1338 SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT);
1340 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1342 // Store first byte : number of int regs
1343 SDValue firstStore = DAG.getStore(Op.getOperand(0), dl, ArgGPR,
1344 Op.getOperand(1), SV, 0);
1345 uint64_t nextOffset = FPROffset;
1346 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
1349 // Store second byte : number of float regs
1350 SDValue secondStore =
1351 DAG.getStore(firstStore, dl, ArgFPR, nextPtr, SV, nextOffset);
1352 nextOffset += StackOffset;
1353 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
1355 // Store second word : arguments given on stack
1356 SDValue thirdStore =
1357 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, SV, nextOffset);
1358 nextOffset += FrameOffset;
1359 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
1361 // Store third word : arguments given in registers
1362 return DAG.getStore(thirdStore, dl, FR, nextPtr, SV, nextOffset);
1366 #include "PPCGenCallingConv.inc"
1368 /// GetFPR - Get the set of FP registers that should be allocated for arguments,
1369 /// depending on which subtarget is selected.
1370 static const unsigned *GetFPR(const PPCSubtarget &Subtarget) {
1371 if (Subtarget.isMachoABI()) {
1372 static const unsigned FPR[] = {
1373 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1374 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
1380 static const unsigned FPR[] = {
1381 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1387 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
1389 static unsigned CalculateStackSlotSize(SDValue Arg, ISD::ArgFlagsTy Flags,
1390 bool isVarArg, unsigned PtrByteSize) {
1391 MVT ArgVT = Arg.getValueType();
1392 unsigned ArgSize =ArgVT.getSizeInBits()/8;
1393 if (Flags.isByVal())
1394 ArgSize = Flags.getByValSize();
1395 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
1401 PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op,
1403 int &VarArgsFrameIndex,
1404 int &VarArgsStackOffset,
1405 unsigned &VarArgsNumGPR,
1406 unsigned &VarArgsNumFPR,
1407 const PPCSubtarget &Subtarget) {
1408 // TODO: add description of PPC stack frame format, or at least some docs.
1410 MachineFunction &MF = DAG.getMachineFunction();
1411 MachineFrameInfo *MFI = MF.getFrameInfo();
1412 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1413 SmallVector<SDValue, 8> ArgValues;
1414 SDValue Root = Op.getOperand(0);
1415 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
1416 DebugLoc dl = Op.getDebugLoc();
1418 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1419 bool isPPC64 = PtrVT == MVT::i64;
1420 bool isMachoABI = Subtarget.isMachoABI();
1421 bool isELF32_ABI = Subtarget.isELF32_ABI();
1422 // Potential tail calls could cause overwriting of argument stack slots.
1423 unsigned CC = MF.getFunction()->getCallingConv();
1424 bool isImmutable = !(PerformTailCallOpt && (CC==CallingConv::Fast));
1425 unsigned PtrByteSize = isPPC64 ? 8 : 4;
1427 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
1428 // Area that is at least reserved in caller of this function.
1429 unsigned MinReservedArea = ArgOffset;
1431 static const unsigned GPR_32[] = { // 32-bit registers.
1432 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
1433 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1435 static const unsigned GPR_64[] = { // 64-bit registers.
1436 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
1437 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
1440 static const unsigned *FPR = GetFPR(Subtarget);
1442 static const unsigned VR[] = {
1443 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
1444 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
1447 const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
1448 const unsigned Num_FPR_Regs = isMachoABI ? 13 : 8;
1449 const unsigned Num_VR_Regs = array_lengthof( VR);
1451 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
1453 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
1455 // In 32-bit non-varargs functions, the stack space for vectors is after the
1456 // stack space for non-vectors. We do not use this space unless we have
1457 // too many vectors to fit in registers, something that only occurs in
1458 // constructed examples:), but we have to walk the arglist to figure
1459 // that out...for the pathological case, compute VecArgOffset as the
1460 // start of the vector parameter area. Computing VecArgOffset is the
1461 // entire point of the following loop.
1462 // Altivec is not mentioned in the ppc32 Elf Supplement, so I'm not trying
1463 // to handle Elf here.
1464 unsigned VecArgOffset = ArgOffset;
1465 if (!isVarArg && !isPPC64) {
1466 for (unsigned ArgNo = 0, e = Op.getNode()->getNumValues()-1; ArgNo != e;
1468 MVT ObjectVT = Op.getValue(ArgNo).getValueType();
1469 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1470 ISD::ArgFlagsTy Flags =
1471 cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags();
1473 if (Flags.isByVal()) {
1474 // ObjSize is the true size, ArgSize rounded up to multiple of regs.
1475 ObjSize = Flags.getByValSize();
1477 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
1478 VecArgOffset += ArgSize;
1482 switch(ObjectVT.getSimpleVT()) {
1483 default: assert(0 && "Unhandled argument type!");
1486 VecArgOffset += isPPC64 ? 8 : 4;
1488 case MVT::i64: // PPC64
1496 // Nothing to do, we're only looking at Nonvector args here.
1501 // We've found where the vector parameter area in memory is. Skip the
1502 // first 12 parameters; these don't use that memory.
1503 VecArgOffset = ((VecArgOffset+15)/16)*16;
1504 VecArgOffset += 12*16;
1506 // Add DAG nodes to load the arguments or copy them out of registers. On
1507 // entry to a function on PPC, the arguments start after the linkage area,
1508 // although the first ones are often in registers.
1510 // In the ELF 32 ABI, GPRs and stack are double word align: an argument
1511 // represented with two words (long long or double) must be copied to an
1512 // even GPR_idx value or to an even ArgOffset value.
1514 SmallVector<SDValue, 8> MemOps;
1515 unsigned nAltivecParamsAtEnd = 0;
1516 for (unsigned ArgNo = 0, e = Op.getNode()->getNumValues() - 1;
1517 ArgNo != e; ++ArgNo) {
1519 bool needsLoad = false;
1520 MVT ObjectVT = Op.getValue(ArgNo).getValueType();
1521 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1522 unsigned ArgSize = ObjSize;
1523 ISD::ArgFlagsTy Flags =
1524 cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags();
1525 // See if next argument requires stack alignment in ELF
1526 bool Align = Flags.isSplit();
1528 unsigned CurArgOffset = ArgOffset;
1530 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
1531 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
1532 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
1533 if (isVarArg || isPPC64) {
1534 MinReservedArea = ((MinReservedArea+15)/16)*16;
1535 MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo),
1539 } else nAltivecParamsAtEnd++;
1541 // Calculate min reserved area.
1542 MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo),
1547 // FIXME alignment for ELF may not be right
1548 // FIXME the codegen can be much improved in some cases.
1549 // We do not have to keep everything in memory.
1550 if (Flags.isByVal()) {
1551 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
1552 ObjSize = Flags.getByValSize();
1553 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
1554 // Double word align in ELF
1555 if (Align && isELF32_ABI) GPR_idx += (GPR_idx % 2);
1556 // Objects of size 1 and 2 are right justified, everything else is
1557 // left justified. This means the memory address is adjusted forwards.
1558 if (ObjSize==1 || ObjSize==2) {
1559 CurArgOffset = CurArgOffset + (4 - ObjSize);
1561 // The value of the object is its address.
1562 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset);
1563 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1564 ArgValues.push_back(FIN);
1565 if (ObjSize==1 || ObjSize==2) {
1566 if (GPR_idx != Num_GPR_Regs) {
1567 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
1568 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1569 SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, PtrVT);
1570 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
1571 NULL, 0, ObjSize==1 ? MVT::i8 : MVT::i16 );
1572 MemOps.push_back(Store);
1574 if (isMachoABI) ArgOffset += PtrByteSize;
1576 ArgOffset += PtrByteSize;
1580 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
1581 // Store whatever pieces of the object are in registers
1582 // to memory. ArgVal will be address of the beginning of
1584 if (GPR_idx != Num_GPR_Regs) {
1585 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
1586 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1587 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset);
1588 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1589 SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, PtrVT);
1590 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
1591 MemOps.push_back(Store);
1593 if (isMachoABI) ArgOffset += PtrByteSize;
1595 ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
1602 switch (ObjectVT.getSimpleVT()) {
1603 default: assert(0 && "Unhandled argument type!");
1606 // Double word align in ELF
1607 if (Align && isELF32_ABI) GPR_idx += (GPR_idx % 2);
1609 if (GPR_idx != Num_GPR_Regs) {
1610 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
1611 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1612 ArgVal = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
1616 ArgSize = PtrByteSize;
1618 // Stack align in ELF
1619 if (needsLoad && Align && isELF32_ABI)
1620 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
1621 // All int arguments reserve stack space in Macho ABI.
1622 if (isMachoABI || needsLoad) ArgOffset += PtrByteSize;
1626 case MVT::i64: // PPC64
1627 if (GPR_idx != Num_GPR_Regs) {
1628 unsigned VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
1629 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1630 ArgVal = DAG.getCopyFromReg(Root, dl, VReg, MVT::i64);
1632 if (ObjectVT == MVT::i32) {
1633 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
1634 // value to MVT::i64 and then truncate to the correct register size.
1636 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
1637 DAG.getValueType(ObjectVT));
1638 else if (Flags.isZExt())
1639 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
1640 DAG.getValueType(ObjectVT));
1642 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
1648 ArgSize = PtrByteSize;
1650 // All int arguments reserve stack space in Macho ABI.
1651 if (isMachoABI || needsLoad) ArgOffset += 8;
1656 // Every 4 bytes of argument space consumes one of the GPRs available for
1657 // argument passing.
1658 if (GPR_idx != Num_GPR_Regs && isMachoABI) {
1660 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
1663 if (FPR_idx != Num_FPR_Regs) {
1665 if (ObjectVT == MVT::f32)
1666 VReg = RegInfo.createVirtualRegister(&PPC::F4RCRegClass);
1668 VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
1669 RegInfo.addLiveIn(FPR[FPR_idx], VReg);
1670 ArgVal = DAG.getCopyFromReg(Root, dl, VReg, ObjectVT);
1676 // Stack align in ELF
1677 if (needsLoad && Align && isELF32_ABI)
1678 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
1679 // All FP arguments reserve stack space in Macho ABI.
1680 if (isMachoABI || needsLoad) ArgOffset += isPPC64 ? 8 : ObjSize;
1686 // Note that vector arguments in registers don't reserve stack space,
1687 // except in varargs functions.
1688 if (VR_idx != Num_VR_Regs) {
1689 unsigned VReg = RegInfo.createVirtualRegister(&PPC::VRRCRegClass);
1690 RegInfo.addLiveIn(VR[VR_idx], VReg);
1691 ArgVal = DAG.getCopyFromReg(Root, dl, VReg, ObjectVT);
1693 while ((ArgOffset % 16) != 0) {
1694 ArgOffset += PtrByteSize;
1695 if (GPR_idx != Num_GPR_Regs)
1699 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs);
1703 if (!isVarArg && !isPPC64) {
1704 // Vectors go after all the nonvectors.
1705 CurArgOffset = VecArgOffset;
1708 // Vectors are aligned.
1709 ArgOffset = ((ArgOffset+15)/16)*16;
1710 CurArgOffset = ArgOffset;
1718 // We need to load the argument to a virtual register if we determined above
1719 // that we ran out of physical registers of the appropriate type.
1721 int FI = MFI->CreateFixedObject(ObjSize,
1722 CurArgOffset + (ArgSize - ObjSize),
1724 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1725 ArgVal = DAG.getLoad(ObjectVT, dl, Root, FIN, NULL, 0);
1728 ArgValues.push_back(ArgVal);
1731 // Set the size that is at least reserved in caller of this function. Tail
1732 // call optimized function's reserved stack space needs to be aligned so that
1733 // taking the difference between two stack areas will result in an aligned
1735 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1736 // Add the Altivec parameters at the end, if needed.
1737 if (nAltivecParamsAtEnd) {
1738 MinReservedArea = ((MinReservedArea+15)/16)*16;
1739 MinReservedArea += 16*nAltivecParamsAtEnd;
1742 std::max(MinReservedArea,
1743 PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI));
1744 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()->
1745 getStackAlignment();
1746 unsigned AlignMask = TargetAlign-1;
1747 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
1748 FI->setMinReservedArea(MinReservedArea);
1750 // If the function takes variable number of arguments, make a frame index for
1751 // the start of the first vararg value... for expansion of llvm.va_start.
1756 VarArgsNumGPR = GPR_idx;
1757 VarArgsNumFPR = FPR_idx;
1759 // Make room for Num_GPR_Regs, Num_FPR_Regs and for a possible frame
1761 depth = -(Num_GPR_Regs * PtrVT.getSizeInBits()/8 +
1762 Num_FPR_Regs * MVT(MVT::f64).getSizeInBits()/8 +
1763 PtrVT.getSizeInBits()/8);
1765 VarArgsStackOffset = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
1772 VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
1774 SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1776 // In ELF 32 ABI, the fixed integer arguments of a variadic function are
1777 // stored to the VarArgsFrameIndex on the stack.
1779 for (GPR_idx = 0; GPR_idx != VarArgsNumGPR; ++GPR_idx) {
1780 SDValue Val = DAG.getRegister(GPR[GPR_idx], PtrVT);
1781 SDValue Store = DAG.getStore(Root, dl, Val, FIN, NULL, 0);
1782 MemOps.push_back(Store);
1783 // Increment the address by four for the next argument to store
1784 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
1785 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
1789 // If this function is vararg, store any remaining integer argument regs
1790 // to their spots on the stack so that they may be loaded by deferencing the
1791 // result of va_next.
1792 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
1795 VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
1797 VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
1799 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1800 SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, PtrVT);
1801 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
1802 MemOps.push_back(Store);
1803 // Increment the address by four for the next argument to store
1804 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
1805 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
1808 // In ELF 32 ABI, the double arguments are stored to the VarArgsFrameIndex
1811 for (FPR_idx = 0; FPR_idx != VarArgsNumFPR; ++FPR_idx) {
1812 SDValue Val = DAG.getRegister(FPR[FPR_idx], MVT::f64);
1813 SDValue Store = DAG.getStore(Root, dl, Val, FIN, NULL, 0);
1814 MemOps.push_back(Store);
1815 // Increment the address by eight for the next argument to store
1816 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
1818 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
1821 for (; FPR_idx != Num_FPR_Regs; ++FPR_idx) {
1823 VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
1825 RegInfo.addLiveIn(FPR[FPR_idx], VReg);
1826 SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::f64);
1827 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
1828 MemOps.push_back(Store);
1829 // Increment the address by eight for the next argument to store
1830 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
1832 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
1837 if (!MemOps.empty())
1838 Root = DAG.getNode(ISD::TokenFactor, dl,
1839 MVT::Other, &MemOps[0], MemOps.size());
1841 ArgValues.push_back(Root);
1843 // Return the new list of results.
1844 return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
1845 &ArgValues[0], ArgValues.size());
1848 /// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus
1851 CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
1856 CallSDNode *TheCall,
1857 unsigned &nAltivecParamsAtEnd) {
1858 // Count how many bytes are to be pushed on the stack, including the linkage
1859 // area, and parameter passing area. We start with 24/48 bytes, which is
1860 // prereserved space for [SP][CR][LR][3 x unused].
1861 unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
1862 unsigned NumOps = TheCall->getNumArgs();
1863 unsigned PtrByteSize = isPPC64 ? 8 : 4;
1865 // Add up all the space actually used.
1866 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
1867 // they all go in registers, but we must reserve stack space for them for
1868 // possible use by the caller. In varargs or 64-bit calls, parameters are
1869 // assigned stack space in order, with padding so Altivec parameters are
1871 nAltivecParamsAtEnd = 0;
1872 for (unsigned i = 0; i != NumOps; ++i) {
1873 SDValue Arg = TheCall->getArg(i);
1874 ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
1875 MVT ArgVT = Arg.getValueType();
1876 // Varargs Altivec parameters are padded to a 16 byte boundary.
1877 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
1878 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) {
1879 if (!isVarArg && !isPPC64) {
1880 // Non-varargs Altivec parameters go after all the non-Altivec
1881 // parameters; handle those later so we know how much padding we need.
1882 nAltivecParamsAtEnd++;
1885 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
1886 NumBytes = ((NumBytes+15)/16)*16;
1888 NumBytes += CalculateStackSlotSize(Arg, Flags, isVarArg, PtrByteSize);
1891 // Allow for Altivec parameters at the end, if needed.
1892 if (nAltivecParamsAtEnd) {
1893 NumBytes = ((NumBytes+15)/16)*16;
1894 NumBytes += 16*nAltivecParamsAtEnd;
1897 // The prolog code of the callee may store up to 8 GPR argument registers to
1898 // the stack, allowing va_start to index over them in memory if its varargs.
1899 // Because we cannot tell if this is needed on the caller side, we have to
1900 // conservatively assume that it is needed. As such, make sure we have at
1901 // least enough stack space for the caller to store the 8 GPRs.
1902 NumBytes = std::max(NumBytes,
1903 PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI));
1905 // Tail call needs the stack to be aligned.
1906 if (CC==CallingConv::Fast && PerformTailCallOpt) {
1907 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()->
1908 getStackAlignment();
1909 unsigned AlignMask = TargetAlign-1;
1910 NumBytes = (NumBytes + AlignMask) & ~AlignMask;
1916 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
1917 /// adjusted to accomodate the arguments for the tailcall.
1918 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool IsTailCall,
1919 unsigned ParamSize) {
1921 if (!IsTailCall) return 0;
1923 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
1924 unsigned CallerMinReservedArea = FI->getMinReservedArea();
1925 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
1926 // Remember only if the new adjustement is bigger.
1927 if (SPDiff < FI->getTailCallSPDelta())
1928 FI->setTailCallSPDelta(SPDiff);
1933 /// IsEligibleForTailCallElimination - Check to see whether the next instruction
1934 /// following the call is a return. A function is eligible if caller/callee
1935 /// calling conventions match, currently only fastcc supports tail calls, and
1936 /// the function CALL is immediatly followed by a RET.
1938 PPCTargetLowering::IsEligibleForTailCallOptimization(CallSDNode *TheCall,
1940 SelectionDAG& DAG) const {
1941 // Variable argument functions are not supported.
1942 if (!PerformTailCallOpt || TheCall->isVarArg())
1945 if (CheckTailCallReturnConstraints(TheCall, Ret)) {
1946 MachineFunction &MF = DAG.getMachineFunction();
1947 unsigned CallerCC = MF.getFunction()->getCallingConv();
1948 unsigned CalleeCC = TheCall->getCallingConv();
1949 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
1950 // Functions containing by val parameters are not supported.
1951 for (unsigned i = 0; i != TheCall->getNumArgs(); i++) {
1952 ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
1953 if (Flags.isByVal()) return false;
1956 SDValue Callee = TheCall->getCallee();
1957 // Non PIC/GOT tail calls are supported.
1958 if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
1961 // At the moment we can only do local tail calls (in same module, hidden
1962 // or protected) if we are generating PIC.
1963 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1964 return G->getGlobal()->hasHiddenVisibility()
1965 || G->getGlobal()->hasProtectedVisibility();
1972 /// isCallCompatibleAddress - Return the immediate to use if the specified
1973 /// 32-bit value is representable in the immediate field of a BxA instruction.
1974 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
1975 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1978 int Addr = C->getZExtValue();
1979 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1980 (Addr << 6 >> 6) != Addr)
1981 return 0; // Top 6 bits have to be sext of immediate.
1983 return DAG.getConstant((int)C->getZExtValue() >> 2,
1984 DAG.getTargetLoweringInfo().getPointerTy()).getNode();
1989 struct TailCallArgumentInfo {
1994 TailCallArgumentInfo() : FrameIdx(0) {}
1999 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
2001 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
2003 const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs,
2004 SmallVector<SDValue, 8> &MemOpChains,
2006 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
2007 SDValue Arg = TailCallArgs[i].Arg;
2008 SDValue FIN = TailCallArgs[i].FrameIdxOp;
2009 int FI = TailCallArgs[i].FrameIdx;
2010 // Store relative to framepointer.
2011 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN,
2012 PseudoSourceValue::getFixedStack(FI),
2017 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
2018 /// the appropriate stack slot for the tail call optimized function call.
2019 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
2020 MachineFunction &MF,
2029 // Calculate the new stack slot for the return address.
2030 int SlotSize = isPPC64 ? 8 : 4;
2031 int NewRetAddrLoc = SPDiff + PPCFrameInfo::getReturnSaveOffset(isPPC64,
2033 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
2035 int NewFPLoc = SPDiff + PPCFrameInfo::getFramePointerSaveOffset(isPPC64,
2037 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc);
2039 MVT VT = isPPC64 ? MVT::i64 : MVT::i32;
2040 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
2041 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
2042 PseudoSourceValue::getFixedStack(NewRetAddr), 0);
2043 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
2044 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
2045 PseudoSourceValue::getFixedStack(NewFPIdx), 0);
2050 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
2051 /// the position of the argument.
2053 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
2054 SDValue Arg, int SPDiff, unsigned ArgOffset,
2055 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
2056 int Offset = ArgOffset + SPDiff;
2057 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
2058 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
2059 MVT VT = isPPC64 ? MVT::i64 : MVT::i32;
2060 SDValue FIN = DAG.getFrameIndex(FI, VT);
2061 TailCallArgumentInfo Info;
2063 Info.FrameIdxOp = FIN;
2065 TailCallArguments.push_back(Info);
2068 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
2069 /// stack slot. Returns the chain as result and the loaded frame pointers in
2070 /// LROpOut/FPOpout. Used when tail calling.
2071 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
2078 // Load the LR and FP stack slot for later adjusting.
2079 MVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32;
2080 LROpOut = getReturnAddrFrameIndex(DAG);
2081 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, NULL, 0);
2082 Chain = SDValue(LROpOut.getNode(), 1);
2083 FPOpOut = getFramePointerFrameIndex(DAG);
2084 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, NULL, 0);
2085 Chain = SDValue(FPOpOut.getNode(), 1);
2090 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
2091 /// by "Src" to address "Dst" of size "Size". Alignment information is
2092 /// specified by the specific parameter attribute. The copy will be passed as
2093 /// a byval function parameter.
2094 /// Sometimes what we are copying is the end of a larger object, the part that
2095 /// does not fit in registers.
2097 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2098 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2099 unsigned Size, DebugLoc dl) {
2100 SDValue SizeNode = DAG.getConstant(Size, MVT::i32);
2101 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2102 false, NULL, 0, NULL, 0);
2105 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
2108 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
2109 SDValue Arg, SDValue PtrOff, int SPDiff,
2110 unsigned ArgOffset, bool isPPC64, bool isTailCall,
2111 bool isVector, SmallVector<SDValue, 8> &MemOpChains,
2112 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments,
2114 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2119 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
2121 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
2122 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
2123 DAG.getConstant(ArgOffset, PtrVT));
2125 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0));
2126 // Calculate and remember argument location.
2127 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
2131 SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG,
2132 const PPCSubtarget &Subtarget,
2133 TargetMachine &TM) {
2134 CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
2135 SDValue Chain = TheCall->getChain();
2136 bool isVarArg = TheCall->isVarArg();
2137 unsigned CC = TheCall->getCallingConv();
2138 bool isTailCall = TheCall->isTailCall()
2139 && CC == CallingConv::Fast && PerformTailCallOpt;
2140 SDValue Callee = TheCall->getCallee();
2141 unsigned NumOps = TheCall->getNumArgs();
2142 DebugLoc dl = TheCall->getDebugLoc();
2144 bool isMachoABI = Subtarget.isMachoABI();
2145 bool isELF32_ABI = Subtarget.isELF32_ABI();
2147 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2148 bool isPPC64 = PtrVT == MVT::i64;
2149 unsigned PtrByteSize = isPPC64 ? 8 : 4;
2151 MachineFunction &MF = DAG.getMachineFunction();
2153 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in
2154 // SelectExpr to use to put the arguments in the appropriate registers.
2155 std::vector<SDValue> args_to_use;
2157 // Mark this function as potentially containing a function that contains a
2158 // tail call. As a consequence the frame pointer will be used for dynamicalloc
2159 // and restoring the callers stack pointer in this functions epilog. This is
2160 // done because by tail calling the called function might overwrite the value
2161 // in this function's (MF) stack pointer stack slot 0(SP).
2162 if (PerformTailCallOpt && CC==CallingConv::Fast)
2163 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
2165 unsigned nAltivecParamsAtEnd = 0;
2167 // Count how many bytes are to be pushed on the stack, including the linkage
2168 // area, and parameter passing area. We start with 24/48 bytes, which is
2169 // prereserved space for [SP][CR][LR][3 x unused].
2171 CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isMachoABI, isVarArg, CC,
2172 TheCall, nAltivecParamsAtEnd);
2174 // Calculate by how many bytes the stack has to be adjusted in case of tail
2175 // call optimization.
2176 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
2178 // Adjust the stack pointer for the new arguments...
2179 // These operations are automatically eliminated by the prolog/epilog pass
2180 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
2181 SDValue CallSeqStart = Chain;
2183 // Load the return address and frame pointer so it can be move somewhere else
2186 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
2188 // Set up a copy of the stack pointer for use loading and storing any
2189 // arguments that may not fit in the registers available for argument
2193 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
2195 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
2197 // Figure out which arguments are going to go in registers, and which in
2198 // memory. Also, if this is a vararg function, floating point operations
2199 // must be stored to our stack, and loaded into integer regs as well, if
2200 // any integer regs are available for argument passing.
2201 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
2202 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
2204 static const unsigned GPR_32[] = { // 32-bit registers.
2205 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
2206 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
2208 static const unsigned GPR_64[] = { // 64-bit registers.
2209 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
2210 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
2212 static const unsigned *FPR = GetFPR(Subtarget);
2214 static const unsigned VR[] = {
2215 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
2216 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
2218 const unsigned NumGPRs = array_lengthof(GPR_32);
2219 const unsigned NumFPRs = isMachoABI ? 13 : 8;
2220 const unsigned NumVRs = array_lengthof( VR);
2222 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
2224 std::vector<std::pair<unsigned, SDValue> > RegsToPass;
2225 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
2227 SmallVector<SDValue, 8> MemOpChains;
2228 for (unsigned i = 0; i != NumOps; ++i) {
2230 SDValue Arg = TheCall->getArg(i);
2231 ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
2232 // See if next argument requires stack alignment in ELF
2233 bool Align = Flags.isSplit();
2235 // PtrOff will be used to store the current argument to the stack if a
2236 // register cannot be found for it.
2239 // Stack align in ELF 32
2240 if (isELF32_ABI && Align)
2241 PtrOff = DAG.getConstant(ArgOffset + ((ArgOffset/4) % 2) * PtrByteSize,
2242 StackPtr.getValueType());
2244 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
2246 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
2248 // On PPC64, promote integers to 64-bit values.
2249 if (isPPC64 && Arg.getValueType() == MVT::i32) {
2250 // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
2251 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
2252 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
2255 // FIXME Elf untested, what are alignment rules?
2256 // FIXME memcpy is used way more than necessary. Correctness first.
2257 if (Flags.isByVal()) {
2258 unsigned Size = Flags.getByValSize();
2259 if (isELF32_ABI && Align) GPR_idx += (GPR_idx % 2);
2260 if (Size==1 || Size==2) {
2261 // Very small objects are passed right-justified.
2262 // Everything else is passed left-justified.
2263 MVT VT = (Size==1) ? MVT::i8 : MVT::i16;
2264 if (GPR_idx != NumGPRs) {
2265 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
2267 MemOpChains.push_back(Load.getValue(1));
2268 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
2270 ArgOffset += PtrByteSize;
2272 SDValue Const = DAG.getConstant(4 - Size, PtrOff.getValueType());
2273 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
2274 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr,
2275 CallSeqStart.getNode()->getOperand(0),
2276 Flags, DAG, Size, dl);
2277 // This must go outside the CALLSEQ_START..END.
2278 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
2279 CallSeqStart.getNode()->getOperand(1));
2280 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
2281 NewCallSeqStart.getNode());
2282 Chain = CallSeqStart = NewCallSeqStart;
2283 ArgOffset += PtrByteSize;
2287 // Copy entire object into memory. There are cases where gcc-generated
2288 // code assumes it is there, even if it could be put entirely into
2289 // registers. (This is not what the doc says.)
2290 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
2291 CallSeqStart.getNode()->getOperand(0),
2292 Flags, DAG, Size, dl);
2293 // This must go outside the CALLSEQ_START..END.
2294 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
2295 CallSeqStart.getNode()->getOperand(1));
2296 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), NewCallSeqStart.getNode());
2297 Chain = CallSeqStart = NewCallSeqStart;
2298 // And copy the pieces of it that fit into registers.
2299 for (unsigned j=0; j<Size; j+=PtrByteSize) {
2300 SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
2301 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
2302 if (GPR_idx != NumGPRs) {
2303 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, NULL, 0);
2304 MemOpChains.push_back(Load.getValue(1));
2305 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
2307 ArgOffset += PtrByteSize;
2309 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
2316 switch (Arg.getValueType().getSimpleVT()) {
2317 default: assert(0 && "Unexpected ValueType for argument!");
2320 // Double word align in ELF
2321 if (isELF32_ABI && Align) GPR_idx += (GPR_idx % 2);
2322 if (GPR_idx != NumGPRs) {
2323 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
2325 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
2326 isPPC64, isTailCall, false, MemOpChains,
2327 TailCallArguments, dl);
2330 if (inMem || isMachoABI) {
2331 // Stack align in ELF
2332 if (isELF32_ABI && Align)
2333 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
2335 ArgOffset += PtrByteSize;
2340 if (FPR_idx != NumFPRs) {
2341 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
2344 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0);
2345 MemOpChains.push_back(Store);
2347 // Float varargs are always shadowed in available integer registers
2348 if (GPR_idx != NumGPRs) {
2349 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, NULL, 0);
2350 MemOpChains.push_back(Load.getValue(1));
2351 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++],
2354 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
2355 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
2356 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
2357 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, NULL, 0);
2358 MemOpChains.push_back(Load.getValue(1));
2359 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++],
2363 // If we have any FPRs remaining, we may also have GPRs remaining.
2364 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
2367 if (GPR_idx != NumGPRs)
2369 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
2370 !isPPC64) // PPC64 has 64-bit GPR's obviously :)
2375 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
2376 isPPC64, isTailCall, false, MemOpChains,
2377 TailCallArguments, dl);
2380 if (inMem || isMachoABI) {
2381 // Stack align in ELF
2382 if (isELF32_ABI && Align)
2383 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
2387 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
2395 // These go aligned on the stack, or in the corresponding R registers
2396 // when within range. The Darwin PPC ABI doc claims they also go in
2397 // V registers; in fact gcc does this only for arguments that are
2398 // prototyped, not for those that match the ... We do it for all
2399 // arguments, seems to work.
2400 while (ArgOffset % 16 !=0) {
2401 ArgOffset += PtrByteSize;
2402 if (GPR_idx != NumGPRs)
2405 // We could elide this store in the case where the object fits
2406 // entirely in R registers. Maybe later.
2407 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
2408 DAG.getConstant(ArgOffset, PtrVT));
2409 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0);
2410 MemOpChains.push_back(Store);
2411 if (VR_idx != NumVRs) {
2412 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, NULL, 0);
2413 MemOpChains.push_back(Load.getValue(1));
2414 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
2417 for (unsigned i=0; i<16; i+=PtrByteSize) {
2418 if (GPR_idx == NumGPRs)
2420 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
2421 DAG.getConstant(i, PtrVT));
2422 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, NULL, 0);
2423 MemOpChains.push_back(Load.getValue(1));
2424 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
2429 // Non-varargs Altivec params generally go in registers, but have
2430 // stack space allocated at the end.
2431 if (VR_idx != NumVRs) {
2432 // Doesn't have GPR space allocated.
2433 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
2434 } else if (nAltivecParamsAtEnd==0) {
2435 // We are emitting Altivec params in order.
2436 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
2437 isPPC64, isTailCall, true, MemOpChains,
2438 TailCallArguments, dl);
2444 // If all Altivec parameters fit in registers, as they usually do,
2445 // they get stack space following the non-Altivec parameters. We
2446 // don't track this here because nobody below needs it.
2447 // If there are more Altivec parameters than fit in registers emit
2449 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
2451 // Offset is aligned; skip 1st 12 params which go in V registers.
2452 ArgOffset = ((ArgOffset+15)/16)*16;
2454 for (unsigned i = 0; i != NumOps; ++i) {
2455 SDValue Arg = TheCall->getArg(i);
2456 MVT ArgType = Arg.getValueType();
2457 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
2458 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
2461 // We are emitting Altivec params in order.
2462 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
2463 isPPC64, isTailCall, true, MemOpChains,
2464 TailCallArguments, dl);
2471 if (!MemOpChains.empty())
2472 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2473 &MemOpChains[0], MemOpChains.size());
2475 // Build a sequence of copy-to-reg nodes chained together with token chain
2476 // and flag operands which copy the outgoing args into the appropriate regs.
2478 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2479 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
2480 RegsToPass[i].second, InFlag);
2481 InFlag = Chain.getValue(1);
2484 // With the ELF 32 ABI, set CR6 to true if this is a vararg call.
2485 if (isVarArg && isELF32_ABI) {
2486 SDValue SetCR(DAG.getTargetNode(PPC::CRSET, dl, MVT::i32), 0);
2487 Chain = DAG.getCopyToReg(Chain, dl, PPC::CR1EQ, SetCR, InFlag);
2488 InFlag = Chain.getValue(1);
2491 // Emit a sequence of copyto/copyfrom virtual registers for arguments that
2492 // might overwrite each other in case of tail call optimization.
2494 SmallVector<SDValue, 8> MemOpChains2;
2495 // Do not flag preceeding copytoreg stuff together with the following stuff.
2497 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
2499 if (!MemOpChains2.empty())
2500 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2501 &MemOpChains2[0], MemOpChains2.size());
2503 // Store the return address to the appropriate stack slot.
2504 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff,
2505 isPPC64, isMachoABI, dl);
2508 // Emit callseq_end just before tailcall node.
2510 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
2511 DAG.getIntPtrConstant(0, true), InFlag);
2512 InFlag = Chain.getValue(1);
2515 std::vector<MVT> NodeTys;
2516 NodeTys.push_back(MVT::Other); // Returns a chain
2517 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
2519 SmallVector<SDValue, 8> Ops;
2520 unsigned CallOpc = isMachoABI? PPCISD::CALL_Macho : PPCISD::CALL_ELF;
2522 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
2523 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
2524 // node so that legalize doesn't hack it.
2525 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
2526 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType());
2527 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
2528 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType());
2529 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
2530 // If this is an absolute destination address, use the munged value.
2531 Callee = SDValue(Dest, 0);
2533 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair
2534 // to do the call, we can't use PPCISD::CALL.
2535 SDValue MTCTROps[] = {Chain, Callee, InFlag};
2536 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps,
2537 2 + (InFlag.getNode() != 0));
2538 InFlag = Chain.getValue(1);
2540 // Copy the callee address into R12/X12 on darwin.
2542 unsigned Reg = Callee.getValueType() == MVT::i32 ? PPC::R12 : PPC::X12;
2543 Chain = DAG.getCopyToReg(Chain, dl, Reg, Callee, InFlag);
2544 InFlag = Chain.getValue(1);
2548 NodeTys.push_back(MVT::Other);
2549 NodeTys.push_back(MVT::Flag);
2550 Ops.push_back(Chain);
2551 CallOpc = isMachoABI ? PPCISD::BCTRL_Macho : PPCISD::BCTRL_ELF;
2553 // Add CTR register as callee so a bctr can be emitted later.
2555 Ops.push_back(DAG.getRegister(PPC::CTR, getPointerTy()));
2558 // If this is a direct call, pass the chain and the callee.
2559 if (Callee.getNode()) {
2560 Ops.push_back(Chain);
2561 Ops.push_back(Callee);
2563 // If this is a tail call add stack pointer delta.
2565 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32));
2567 // Add argument registers to the end of the list so that they are known live
2569 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2570 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2571 RegsToPass[i].second.getValueType()));
2573 // When performing tail call optimization the callee pops its arguments off
2574 // the stack. Account for this here so these bytes can be pushed back on in
2575 // PPCRegisterInfo::eliminateCallFramePseudoInstr.
2576 int BytesCalleePops =
2577 (CC==CallingConv::Fast && PerformTailCallOpt) ? NumBytes : 0;
2579 if (InFlag.getNode())
2580 Ops.push_back(InFlag);
2584 assert(InFlag.getNode() &&
2585 "Flag must be set. Depend on flag being set in LowerRET");
2586 Chain = DAG.getNode(PPCISD::TAILCALL, dl,
2587 TheCall->getVTList(), &Ops[0], Ops.size());
2588 return SDValue(Chain.getNode(), Op.getResNo());
2591 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
2592 InFlag = Chain.getValue(1);
2594 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
2595 DAG.getIntPtrConstant(BytesCalleePops, true),
2597 if (TheCall->getValueType(0) != MVT::Other)
2598 InFlag = Chain.getValue(1);
2600 SmallVector<SDValue, 16> ResultVals;
2601 SmallVector<CCValAssign, 16> RVLocs;
2602 unsigned CallerCC = DAG.getMachineFunction().getFunction()->getCallingConv();
2603 CCState CCInfo(CallerCC, isVarArg, TM, RVLocs);
2604 CCInfo.AnalyzeCallResult(TheCall, RetCC_PPC);
2606 // Copy all of the result registers out of their specified physreg.
2607 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2608 CCValAssign &VA = RVLocs[i];
2609 MVT VT = VA.getValVT();
2610 assert(VA.isRegLoc() && "Can only return in registers!");
2611 Chain = DAG.getCopyFromReg(Chain, dl,
2612 VA.getLocReg(), VT, InFlag).getValue(1);
2613 ResultVals.push_back(Chain.getValue(0));
2614 InFlag = Chain.getValue(2);
2617 // If the function returns void, just return the chain.
2621 // Otherwise, merge everything together with a MERGE_VALUES node.
2622 ResultVals.push_back(Chain);
2623 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
2624 &ResultVals[0], ResultVals.size());
2625 return Res.getValue(Op.getResNo());
2628 SDValue PPCTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG,
2629 TargetMachine &TM) {
2630 SmallVector<CCValAssign, 16> RVLocs;
2631 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
2632 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
2633 DebugLoc dl = Op.getDebugLoc();
2634 CCState CCInfo(CC, isVarArg, TM, RVLocs);
2635 CCInfo.AnalyzeReturn(Op.getNode(), RetCC_PPC);
2637 // If this is the first return lowered for this function, add the regs to the
2638 // liveout set for the function.
2639 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
2640 for (unsigned i = 0; i != RVLocs.size(); ++i)
2641 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
2644 SDValue Chain = Op.getOperand(0);
2646 Chain = GetPossiblePreceedingTailCall(Chain, PPCISD::TAILCALL);
2647 if (Chain.getOpcode() == PPCISD::TAILCALL) {
2648 SDValue TailCall = Chain;
2649 SDValue TargetAddress = TailCall.getOperand(1);
2650 SDValue StackAdjustment = TailCall.getOperand(2);
2652 assert(((TargetAddress.getOpcode() == ISD::Register &&
2653 cast<RegisterSDNode>(TargetAddress)->getReg() == PPC::CTR) ||
2654 TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
2655 TargetAddress.getOpcode() == ISD::TargetGlobalAddress ||
2656 isa<ConstantSDNode>(TargetAddress)) &&
2657 "Expecting an global address, external symbol, absolute value or register");
2659 assert(StackAdjustment.getOpcode() == ISD::Constant &&
2660 "Expecting a const value");
2662 SmallVector<SDValue,8> Operands;
2663 Operands.push_back(Chain.getOperand(0));
2664 Operands.push_back(TargetAddress);
2665 Operands.push_back(StackAdjustment);
2666 // Copy registers used by the call. Last operand is a flag so it is not
2668 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
2669 Operands.push_back(Chain.getOperand(i));
2671 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Operands[0],
2677 // Copy the result values into the output registers.
2678 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2679 CCValAssign &VA = RVLocs[i];
2680 assert(VA.isRegLoc() && "Can only return in registers!");
2681 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2682 Op.getOperand(i*2+1), Flag);
2683 Flag = Chain.getValue(1);
2687 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
2689 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain);
2692 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
2693 const PPCSubtarget &Subtarget) {
2694 // When we pop the dynamic allocation we need to restore the SP link.
2695 DebugLoc dl = Op.getDebugLoc();
2697 // Get the corect type for pointers.
2698 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2700 // Construct the stack pointer operand.
2701 bool IsPPC64 = Subtarget.isPPC64();
2702 unsigned SP = IsPPC64 ? PPC::X1 : PPC::R1;
2703 SDValue StackPtr = DAG.getRegister(SP, PtrVT);
2705 // Get the operands for the STACKRESTORE.
2706 SDValue Chain = Op.getOperand(0);
2707 SDValue SaveSP = Op.getOperand(1);
2709 // Load the old link SP.
2710 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, NULL, 0);
2712 // Restore the stack pointer.
2713 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
2715 // Store the old link SP.
2716 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, NULL, 0);
2722 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
2723 MachineFunction &MF = DAG.getMachineFunction();
2724 bool IsPPC64 = PPCSubTarget.isPPC64();
2725 bool isMachoABI = PPCSubTarget.isMachoABI();
2726 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2728 // Get current frame pointer save index. The users of this index will be
2729 // primarily DYNALLOC instructions.
2730 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
2731 int RASI = FI->getReturnAddrSaveIndex();
2733 // If the frame pointer save index hasn't been defined yet.
2735 // Find out what the fix offset of the frame pointer save area.
2736 int LROffset = PPCFrameInfo::getReturnSaveOffset(IsPPC64, isMachoABI);
2737 // Allocate the frame index for frame pointer save area.
2738 RASI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, LROffset);
2740 FI->setReturnAddrSaveIndex(RASI);
2742 return DAG.getFrameIndex(RASI, PtrVT);
2746 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
2747 MachineFunction &MF = DAG.getMachineFunction();
2748 bool IsPPC64 = PPCSubTarget.isPPC64();
2749 bool isMachoABI = PPCSubTarget.isMachoABI();
2750 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2752 // Get current frame pointer save index. The users of this index will be
2753 // primarily DYNALLOC instructions.
2754 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
2755 int FPSI = FI->getFramePointerSaveIndex();
2757 // If the frame pointer save index hasn't been defined yet.
2759 // Find out what the fix offset of the frame pointer save area.
2760 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, isMachoABI);
2762 // Allocate the frame index for frame pointer save area.
2763 FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, FPOffset);
2765 FI->setFramePointerSaveIndex(FPSI);
2767 return DAG.getFrameIndex(FPSI, PtrVT);
2770 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
2772 const PPCSubtarget &Subtarget) {
2774 SDValue Chain = Op.getOperand(0);
2775 SDValue Size = Op.getOperand(1);
2776 DebugLoc dl = Op.getDebugLoc();
2778 // Get the corect type for pointers.
2779 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2781 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
2782 DAG.getConstant(0, PtrVT), Size);
2783 // Construct a node for the frame pointer save index.
2784 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
2785 // Build a DYNALLOC node.
2786 SDValue Ops[3] = { Chain, NegSize, FPSIdx };
2787 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
2788 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3);
2791 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
2793 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
2794 // Not FP? Not a fsel.
2795 if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
2796 !Op.getOperand(2).getValueType().isFloatingPoint())
2799 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2801 // Cannot handle SETEQ/SETNE.
2802 if (CC == ISD::SETEQ || CC == ISD::SETNE) return Op;
2804 MVT ResVT = Op.getValueType();
2805 MVT CmpVT = Op.getOperand(0).getValueType();
2806 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
2807 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3);
2808 DebugLoc dl = Op.getDebugLoc();
2810 // If the RHS of the comparison is a 0.0, we don't need to do the
2811 // subtraction at all.
2812 if (isFloatingPointZero(RHS))
2814 default: break; // SETUO etc aren't handled by fsel.
2817 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
2820 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
2821 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
2822 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
2825 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
2828 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
2829 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
2830 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
2831 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
2836 default: break; // SETUO etc aren't handled by fsel.
2839 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
2840 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
2841 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
2842 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
2845 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
2846 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
2847 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
2848 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
2851 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
2852 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
2853 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
2854 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
2857 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
2858 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
2859 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
2860 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
2865 // FIXME: Split this code up when LegalizeDAGTypes lands.
2866 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2868 assert(Op.getOperand(0).getValueType().isFloatingPoint());
2869 SDValue Src = Op.getOperand(0);
2870 if (Src.getValueType() == MVT::f32)
2871 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
2874 switch (Op.getValueType().getSimpleVT()) {
2875 default: assert(0 && "Unhandled FP_TO_INT type in custom expander!");
2877 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ :
2882 Tmp = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Src);
2886 // Convert the FP value to an int value through memory.
2887 SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64);
2889 // Emit a store to the stack slot.
2890 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, NULL, 0);
2892 // Result is a load from the stack slot. If loading 4 bytes, make sure to
2894 if (Op.getValueType() == MVT::i32)
2895 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
2896 DAG.getConstant(4, FIPtr.getValueType()));
2897 return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, NULL, 0);
2900 SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
2901 DebugLoc dl = Op.getDebugLoc();
2902 // Don't handle ppc_fp128 here; let it be lowered to a libcall.
2903 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
2906 if (Op.getOperand(0).getValueType() == MVT::i64) {
2907 SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, dl,
2908 MVT::f64, Op.getOperand(0));
2909 SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits);
2910 if (Op.getValueType() == MVT::f32)
2911 FP = DAG.getNode(ISD::FP_ROUND, dl,
2912 MVT::f32, FP, DAG.getIntPtrConstant(0));
2916 assert(Op.getOperand(0).getValueType() == MVT::i32 &&
2917 "Unhandled SINT_TO_FP type in custom expander!");
2918 // Since we only generate this in 64-bit mode, we can take advantage of
2919 // 64-bit registers. In particular, sign extend the input value into the
2920 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
2921 // then lfd it and fcfid it.
2922 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
2923 int FrameIdx = FrameInfo->CreateStackObject(8, 8);
2924 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2925 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
2927 SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, dl, MVT::i32,
2930 // STD the extended value into the stack slot.
2931 MachineMemOperand MO(PseudoSourceValue::getFixedStack(FrameIdx),
2932 MachineMemOperand::MOStore, 0, 8, 8);
2933 SDValue Store = DAG.getNode(PPCISD::STD_32, dl, MVT::Other,
2934 DAG.getEntryNode(), Ext64, FIdx,
2935 DAG.getMemOperand(MO));
2936 // Load the value as a double.
2937 SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, NULL, 0);
2939 // FCFID it and return it.
2940 SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Ld);
2941 if (Op.getValueType() == MVT::f32)
2942 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0));
2946 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) {
2947 DebugLoc dl = Op.getDebugLoc();
2949 The rounding mode is in bits 30:31 of FPSR, and has the following
2956 FLT_ROUNDS, on the other hand, expects the following:
2963 To perform the conversion, we do:
2964 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
2967 MachineFunction &MF = DAG.getMachineFunction();
2968 MVT VT = Op.getValueType();
2969 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2970 std::vector<MVT> NodeTys;
2971 SDValue MFFSreg, InFlag;
2973 // Save FP Control Word to register
2974 NodeTys.push_back(MVT::f64); // return register
2975 NodeTys.push_back(MVT::Flag); // unused in this context
2976 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0);
2978 // Save FP register to stack slot
2979 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
2980 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
2981 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain,
2982 StackSlot, NULL, 0);
2984 // Load FP Control Word from low 32 bits of stack slot.
2985 SDValue Four = DAG.getConstant(4, PtrVT);
2986 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
2987 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, NULL, 0);
2989 // Transform as necessary
2991 DAG.getNode(ISD::AND, dl, MVT::i32,
2992 CWD, DAG.getConstant(3, MVT::i32));
2994 DAG.getNode(ISD::SRL, dl, MVT::i32,
2995 DAG.getNode(ISD::AND, dl, MVT::i32,
2996 DAG.getNode(ISD::XOR, dl, MVT::i32,
2997 CWD, DAG.getConstant(3, MVT::i32)),
2998 DAG.getConstant(3, MVT::i32)),
2999 DAG.getConstant(1, MVT::i32));
3002 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
3004 return DAG.getNode((VT.getSizeInBits() < 16 ?
3005 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
3008 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) {
3009 MVT VT = Op.getValueType();
3010 unsigned BitWidth = VT.getSizeInBits();
3011 DebugLoc dl = Op.getDebugLoc();
3012 assert(Op.getNumOperands() == 3 &&
3013 VT == Op.getOperand(1).getValueType() &&
3016 // Expand into a bunch of logical ops. Note that these ops
3017 // depend on the PPC behavior for oversized shift amounts.
3018 SDValue Lo = Op.getOperand(0);
3019 SDValue Hi = Op.getOperand(1);
3020 SDValue Amt = Op.getOperand(2);
3021 MVT AmtVT = Amt.getValueType();
3023 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
3024 DAG.getConstant(BitWidth, AmtVT), Amt);
3025 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
3026 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
3027 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
3028 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
3029 DAG.getConstant(-BitWidth, AmtVT));
3030 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
3031 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
3032 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
3033 SDValue OutOps[] = { OutLo, OutHi };
3034 return DAG.getMergeValues(OutOps, 2, dl);
3037 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) {
3038 MVT VT = Op.getValueType();
3039 DebugLoc dl = Op.getDebugLoc();
3040 unsigned BitWidth = VT.getSizeInBits();
3041 assert(Op.getNumOperands() == 3 &&
3042 VT == Op.getOperand(1).getValueType() &&
3045 // Expand into a bunch of logical ops. Note that these ops
3046 // depend on the PPC behavior for oversized shift amounts.
3047 SDValue Lo = Op.getOperand(0);
3048 SDValue Hi = Op.getOperand(1);
3049 SDValue Amt = Op.getOperand(2);
3050 MVT AmtVT = Amt.getValueType();
3052 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
3053 DAG.getConstant(BitWidth, AmtVT), Amt);
3054 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
3055 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
3056 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
3057 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
3058 DAG.getConstant(-BitWidth, AmtVT));
3059 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
3060 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
3061 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
3062 SDValue OutOps[] = { OutLo, OutHi };
3063 return DAG.getMergeValues(OutOps, 2, dl);
3066 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) {
3067 DebugLoc dl = Op.getDebugLoc();
3068 MVT VT = Op.getValueType();
3069 unsigned BitWidth = VT.getSizeInBits();
3070 assert(Op.getNumOperands() == 3 &&
3071 VT == Op.getOperand(1).getValueType() &&
3074 // Expand into a bunch of logical ops, followed by a select_cc.
3075 SDValue Lo = Op.getOperand(0);
3076 SDValue Hi = Op.getOperand(1);
3077 SDValue Amt = Op.getOperand(2);
3078 MVT AmtVT = Amt.getValueType();
3080 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
3081 DAG.getConstant(BitWidth, AmtVT), Amt);
3082 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
3083 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
3084 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
3085 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
3086 DAG.getConstant(-BitWidth, AmtVT));
3087 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
3088 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
3089 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT),
3090 Tmp4, Tmp6, ISD::SETLE);
3091 SDValue OutOps[] = { OutLo, OutHi };
3092 return DAG.getMergeValues(OutOps, 2, dl);
3095 //===----------------------------------------------------------------------===//
3096 // Vector related lowering.
3099 /// BuildSplatI - Build a canonical splati of Val with an element size of
3100 /// SplatSize. Cast the result to VT.
3101 static SDValue BuildSplatI(int Val, unsigned SplatSize, MVT VT,
3102 SelectionDAG &DAG, DebugLoc dl) {
3103 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
3105 static const MVT VTys[] = { // canonical VT to use for each size.
3106 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
3109 MVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
3111 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
3115 MVT CanonicalVT = VTys[SplatSize-1];
3117 // Build a canonical splat for this value.
3118 SDValue Elt = DAG.getConstant(Val, MVT::i32);
3119 SmallVector<SDValue, 8> Ops;
3120 Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
3121 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT,
3122 &Ops[0], Ops.size());
3123 return DAG.getNode(ISD::BIT_CONVERT, dl, ReqVT, Res);
3126 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
3127 /// specified intrinsic ID.
3128 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
3129 SelectionDAG &DAG, DebugLoc dl,
3130 MVT DestVT = MVT::Other) {
3131 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
3132 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
3133 DAG.getConstant(IID, MVT::i32), LHS, RHS);
3136 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
3137 /// specified intrinsic ID.
3138 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
3139 SDValue Op2, SelectionDAG &DAG,
3140 DebugLoc dl, MVT DestVT = MVT::Other) {
3141 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
3142 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
3143 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
3147 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
3148 /// amount. The result has the specified value type.
3149 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
3150 MVT VT, SelectionDAG &DAG, DebugLoc dl) {
3151 // Force LHS/RHS to be the right type.
3152 LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, LHS);
3153 RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, RHS);
3156 for (unsigned i = 0; i != 16; ++i)
3158 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
3159 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T);
3162 // If this is a case we can't handle, return null and let the default
3163 // expansion code take care of it. If we CAN select this case, and if it
3164 // selects to a single instruction, return Op. Otherwise, if we can codegen
3165 // this case more efficiently than a constant pool load, lower it to the
3166 // sequence of ops that should be used.
3167 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
3168 DebugLoc dl = Op.getDebugLoc();
3169 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
3170 assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
3172 // Check if this is a splat of a constant value.
3173 APInt APSplatBits, APSplatUndef;
3174 unsigned SplatBitSize;
3176 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
3177 HasAnyUndefs) || SplatBitSize > 32)
3180 unsigned SplatBits = APSplatBits.getZExtValue();
3181 unsigned SplatUndef = APSplatUndef.getZExtValue();
3182 unsigned SplatSize = SplatBitSize / 8;
3184 // First, handle single instruction cases.
3187 if (SplatBits == 0) {
3188 // Canonicalize all zero vectors to be v4i32.
3189 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
3190 SDValue Z = DAG.getConstant(0, MVT::i32);
3191 Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
3192 Op = DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Z);
3197 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
3198 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
3200 if (SextVal >= -16 && SextVal <= 15)
3201 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
3204 // Two instruction sequences.
3206 // If this value is in the range [-32,30] and is even, use:
3207 // tmp = VSPLTI[bhw], result = add tmp, tmp
3208 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
3209 SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl);
3210 Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res);
3211 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
3214 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
3215 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
3217 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
3218 // Make -1 and vspltisw -1:
3219 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
3221 // Make the VSLW intrinsic, computing 0x8000_0000.
3222 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
3225 // xor by OnesV to invert it.
3226 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
3227 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
3230 // Check to see if this is a wide variety of vsplti*, binop self cases.
3231 static const signed char SplatCsts[] = {
3232 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
3233 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
3236 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
3237 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
3238 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
3239 int i = SplatCsts[idx];
3241 // Figure out what shift amount will be used by altivec if shifted by i in
3243 unsigned TypeShiftAmt = i & (SplatBitSize-1);
3245 // vsplti + shl self.
3246 if (SextVal == (i << (int)TypeShiftAmt)) {
3247 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
3248 static const unsigned IIDs[] = { // Intrinsic to use for each size.
3249 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
3250 Intrinsic::ppc_altivec_vslw
3252 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
3253 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
3256 // vsplti + srl self.
3257 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
3258 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
3259 static const unsigned IIDs[] = { // Intrinsic to use for each size.
3260 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
3261 Intrinsic::ppc_altivec_vsrw
3263 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
3264 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
3267 // vsplti + sra self.
3268 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
3269 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
3270 static const unsigned IIDs[] = { // Intrinsic to use for each size.
3271 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
3272 Intrinsic::ppc_altivec_vsraw
3274 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
3275 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
3278 // vsplti + rol self.
3279 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
3280 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
3281 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
3282 static const unsigned IIDs[] = { // Intrinsic to use for each size.
3283 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
3284 Intrinsic::ppc_altivec_vrlw
3286 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
3287 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
3290 // t = vsplti c, result = vsldoi t, t, 1
3291 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) {
3292 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
3293 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl);
3295 // t = vsplti c, result = vsldoi t, t, 2
3296 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) {
3297 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
3298 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl);
3300 // t = vsplti c, result = vsldoi t, t, 3
3301 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) {
3302 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
3303 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl);
3307 // Three instruction sequences.
3309 // Odd, in range [17,31]: (vsplti C)-(vsplti -16).
3310 if (SextVal >= 0 && SextVal <= 31) {
3311 SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl);
3312 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
3313 LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS);
3314 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS);
3316 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
3317 if (SextVal >= -31 && SextVal <= 0) {
3318 SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl);
3319 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
3320 LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS);
3321 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS);
3327 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
3328 /// the specified operations to build the shuffle.
3329 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
3330 SDValue RHS, SelectionDAG &DAG,
3332 unsigned OpNum = (PFEntry >> 26) & 0x0F;
3333 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
3334 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
3337 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
3349 if (OpNum == OP_COPY) {
3350 if (LHSID == (1*9+2)*9+3) return LHS;
3351 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
3355 SDValue OpLHS, OpRHS;
3356 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
3357 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
3361 default: assert(0 && "Unknown i32 permute!");
3363 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
3364 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
3365 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
3366 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
3369 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
3370 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
3371 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
3372 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
3375 for (unsigned i = 0; i != 16; ++i)
3376 ShufIdxs[i] = (i&3)+0;
3379 for (unsigned i = 0; i != 16; ++i)
3380 ShufIdxs[i] = (i&3)+4;
3383 for (unsigned i = 0; i != 16; ++i)
3384 ShufIdxs[i] = (i&3)+8;
3387 for (unsigned i = 0; i != 16; ++i)
3388 ShufIdxs[i] = (i&3)+12;
3391 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
3393 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
3395 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
3397 MVT VT = OpLHS.getValueType();
3398 OpLHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpLHS);
3399 OpRHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpRHS);
3400 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
3401 return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T);
3404 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
3405 /// is a shuffle we can handle in a single instruction, return it. Otherwise,
3406 /// return the code it can be lowered into. Worst case, it can always be
3407 /// lowered into a vperm.
3408 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
3409 SelectionDAG &DAG) {
3410 DebugLoc dl = Op.getDebugLoc();
3411 SDValue V1 = Op.getOperand(0);
3412 SDValue V2 = Op.getOperand(1);
3413 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
3414 MVT VT = Op.getValueType();
3416 // Cases that are handled by instructions that take permute immediates
3417 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
3418 // selected by the instruction selector.
3419 if (V2.getOpcode() == ISD::UNDEF) {
3420 if (PPC::isSplatShuffleMask(SVOp, 1) ||
3421 PPC::isSplatShuffleMask(SVOp, 2) ||
3422 PPC::isSplatShuffleMask(SVOp, 4) ||
3423 PPC::isVPKUWUMShuffleMask(SVOp, true) ||
3424 PPC::isVPKUHUMShuffleMask(SVOp, true) ||
3425 PPC::isVSLDOIShuffleMask(SVOp, true) != -1 ||
3426 PPC::isVMRGLShuffleMask(SVOp, 1, true) ||
3427 PPC::isVMRGLShuffleMask(SVOp, 2, true) ||
3428 PPC::isVMRGLShuffleMask(SVOp, 4, true) ||
3429 PPC::isVMRGHShuffleMask(SVOp, 1, true) ||
3430 PPC::isVMRGHShuffleMask(SVOp, 2, true) ||
3431 PPC::isVMRGHShuffleMask(SVOp, 4, true)) {
3436 // Altivec has a variety of "shuffle immediates" that take two vector inputs
3437 // and produce a fixed permutation. If any of these match, do not lower to
3439 if (PPC::isVPKUWUMShuffleMask(SVOp, false) ||
3440 PPC::isVPKUHUMShuffleMask(SVOp, false) ||
3441 PPC::isVSLDOIShuffleMask(SVOp, false) != -1 ||
3442 PPC::isVMRGLShuffleMask(SVOp, 1, false) ||
3443 PPC::isVMRGLShuffleMask(SVOp, 2, false) ||
3444 PPC::isVMRGLShuffleMask(SVOp, 4, false) ||
3445 PPC::isVMRGHShuffleMask(SVOp, 1, false) ||
3446 PPC::isVMRGHShuffleMask(SVOp, 2, false) ||
3447 PPC::isVMRGHShuffleMask(SVOp, 4, false))
3450 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
3451 // perfect shuffle table to emit an optimal matching sequence.
3452 SmallVector<int, 16> PermMask;
3453 SVOp->getMask(PermMask);
3455 unsigned PFIndexes[4];
3456 bool isFourElementShuffle = true;
3457 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
3458 unsigned EltNo = 8; // Start out undef.
3459 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
3460 if (PermMask[i*4+j] < 0)
3461 continue; // Undef, ignore it.
3463 unsigned ByteSource = PermMask[i*4+j];
3464 if ((ByteSource & 3) != j) {
3465 isFourElementShuffle = false;
3470 EltNo = ByteSource/4;
3471 } else if (EltNo != ByteSource/4) {
3472 isFourElementShuffle = false;
3476 PFIndexes[i] = EltNo;
3479 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
3480 // perfect shuffle vector to determine if it is cost effective to do this as
3481 // discrete instructions, or whether we should use a vperm.
3482 if (isFourElementShuffle) {
3483 // Compute the index in the perfect shuffle table.
3484 unsigned PFTableIndex =
3485 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
3487 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
3488 unsigned Cost = (PFEntry >> 30);
3490 // Determining when to avoid vperm is tricky. Many things affect the cost
3491 // of vperm, particularly how many times the perm mask needs to be computed.
3492 // For example, if the perm mask can be hoisted out of a loop or is already
3493 // used (perhaps because there are multiple permutes with the same shuffle
3494 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of
3495 // the loop requires an extra register.
3497 // As a compromise, we only emit discrete instructions if the shuffle can be
3498 // generated in 3 or fewer operations. When we have loop information
3499 // available, if this block is within a loop, we should avoid using vperm
3500 // for 3-operation perms and use a constant pool load instead.
3502 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
3505 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
3506 // vector that will get spilled to the constant pool.
3507 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
3509 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
3510 // that it is in input element units, not in bytes. Convert now.
3511 MVT EltVT = V1.getValueType().getVectorElementType();
3512 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
3514 SmallVector<SDValue, 16> ResultMask;
3515 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
3516 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
3518 for (unsigned j = 0; j != BytesPerElement; ++j)
3519 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
3523 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
3524 &ResultMask[0], ResultMask.size());
3525 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask);
3528 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an
3529 /// altivec comparison. If it is, return true and fill in Opc/isDot with
3530 /// information about the intrinsic.
3531 static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc,
3533 unsigned IntrinsicID =
3534 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
3537 switch (IntrinsicID) {
3538 default: return false;
3539 // Comparison predicates.
3540 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break;
3541 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break;
3542 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break;
3543 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break;
3544 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break;
3545 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break;
3546 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break;
3547 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break;
3548 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break;
3549 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break;
3550 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break;
3551 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break;
3552 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break;
3554 // Normal Comparisons.
3555 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break;
3556 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break;
3557 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break;
3558 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break;
3559 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break;
3560 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break;
3561 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break;
3562 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break;
3563 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break;
3564 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break;
3565 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break;
3566 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break;
3567 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break;
3572 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
3573 /// lower, do it, otherwise return null.
3574 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3575 SelectionDAG &DAG) {
3576 // If this is a lowered altivec predicate compare, CompareOpc is set to the
3577 // opcode number of the comparison.
3578 DebugLoc dl = Op.getDebugLoc();
3581 if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
3582 return SDValue(); // Don't custom lower most intrinsics.
3584 // If this is a non-dot comparison, make the VCMP node and we are done.
3586 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
3587 Op.getOperand(1), Op.getOperand(2),
3588 DAG.getConstant(CompareOpc, MVT::i32));
3589 return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Tmp);
3592 // Create the PPCISD altivec 'dot' comparison node.
3594 Op.getOperand(2), // LHS
3595 Op.getOperand(3), // RHS
3596 DAG.getConstant(CompareOpc, MVT::i32)
3598 std::vector<MVT> VTs;
3599 VTs.push_back(Op.getOperand(2).getValueType());
3600 VTs.push_back(MVT::Flag);
3601 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
3603 // Now that we have the comparison, emit a copy from the CR to a GPR.
3604 // This is flagged to the above dot comparison.
3605 SDValue Flags = DAG.getNode(PPCISD::MFCR, dl, MVT::i32,
3606 DAG.getRegister(PPC::CR6, MVT::i32),
3607 CompNode.getValue(1));
3609 // Unpack the result based on how the target uses it.
3610 unsigned BitNo; // Bit # of CR6.
3611 bool InvertBit; // Invert result?
3612 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
3613 default: // Can't happen, don't crash on invalid number though.
3614 case 0: // Return the value of the EQ bit of CR6.
3615 BitNo = 0; InvertBit = false;
3617 case 1: // Return the inverted value of the EQ bit of CR6.
3618 BitNo = 0; InvertBit = true;
3620 case 2: // Return the value of the LT bit of CR6.
3621 BitNo = 2; InvertBit = false;
3623 case 3: // Return the inverted value of the LT bit of CR6.
3624 BitNo = 2; InvertBit = true;
3628 // Shift the bit into the low position.
3629 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
3630 DAG.getConstant(8-(3-BitNo), MVT::i32));
3632 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
3633 DAG.getConstant(1, MVT::i32));
3635 // If we are supposed to, toggle the bit.
3637 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
3638 DAG.getConstant(1, MVT::i32));
3642 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
3643 SelectionDAG &DAG) {
3644 DebugLoc dl = Op.getDebugLoc();
3645 // Create a stack slot that is 16-byte aligned.
3646 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
3647 int FrameIdx = FrameInfo->CreateStackObject(16, 16);
3648 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
3649 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
3651 // Store the input value into Value#0 of the stack slot.
3652 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
3653 Op.getOperand(0), FIdx, NULL, 0);
3655 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, NULL, 0);
3658 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) {
3659 DebugLoc dl = Op.getDebugLoc();
3660 if (Op.getValueType() == MVT::v4i32) {
3661 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
3663 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl);
3664 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
3666 SDValue RHSSwap = // = vrlw RHS, 16
3667 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
3669 // Shrinkify inputs to v8i16.
3670 LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, LHS);
3671 RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHS);
3672 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHSSwap);
3674 // Low parts multiplied together, generating 32-bit results (we ignore the
3676 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
3677 LHS, RHS, DAG, dl, MVT::v4i32);
3679 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
3680 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
3681 // Shift the high parts up 16 bits.
3682 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
3684 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
3685 } else if (Op.getValueType() == MVT::v8i16) {
3686 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
3688 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl);
3690 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
3691 LHS, RHS, Zero, DAG, dl);
3692 } else if (Op.getValueType() == MVT::v16i8) {
3693 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
3695 // Multiply the even 8-bit parts, producing 16-bit sums.
3696 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
3697 LHS, RHS, DAG, dl, MVT::v8i16);
3698 EvenParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, EvenParts);
3700 // Multiply the odd 8-bit parts, producing 16-bit sums.
3701 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
3702 LHS, RHS, DAG, dl, MVT::v8i16);
3703 OddParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OddParts);
3705 // Merge the results together.
3707 for (unsigned i = 0; i != 8; ++i) {
3709 Ops[i*2+1] = 2*i+1+16;
3711 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
3713 assert(0 && "Unknown mul to lower!");
3718 /// LowerOperation - Provide custom lowering hooks for some operations.
3720 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
3721 switch (Op.getOpcode()) {
3722 default: assert(0 && "Wasn't expecting to be able to lower this!");
3723 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3724 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3725 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3726 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
3727 case ISD::SETCC: return LowerSETCC(Op, DAG);
3728 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG);
3730 return LowerVASTART(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
3731 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
3734 return LowerVAARG(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
3735 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
3737 case ISD::FORMAL_ARGUMENTS:
3738 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex,
3739 VarArgsStackOffset, VarArgsNumGPR,
3740 VarArgsNumFPR, PPCSubTarget);
3742 case ISD::CALL: return LowerCALL(Op, DAG, PPCSubTarget,
3743 getTargetMachine());
3744 case ISD::RET: return LowerRET(Op, DAG, getTargetMachine());
3745 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget);
3746 case ISD::DYNAMIC_STACKALLOC:
3747 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget);
3749 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
3750 case ISD::FP_TO_UINT:
3751 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG,
3753 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
3754 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
3756 // Lower 64-bit shifts.
3757 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
3758 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG);
3759 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG);
3761 // Vector-related lowering.
3762 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
3763 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3764 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3765 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
3766 case ISD::MUL: return LowerMUL(Op, DAG);
3768 // Frame & Return address.
3769 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
3770 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
3775 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
3776 SmallVectorImpl<SDValue>&Results,
3777 SelectionDAG &DAG) {
3778 DebugLoc dl = N->getDebugLoc();
3779 switch (N->getOpcode()) {
3781 assert(false && "Do not know how to custom type legalize this operation!");
3783 case ISD::FP_ROUND_INREG: {
3784 assert(N->getValueType(0) == MVT::ppcf128);
3785 assert(N->getOperand(0).getValueType() == MVT::ppcf128);
3786 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
3787 MVT::f64, N->getOperand(0),
3788 DAG.getIntPtrConstant(0));
3789 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
3790 MVT::f64, N->getOperand(0),
3791 DAG.getIntPtrConstant(1));
3793 // This sequence changes FPSCR to do round-to-zero, adds the two halves
3794 // of the long double, and puts FPSCR back the way it was. We do not
3795 // actually model FPSCR.
3796 std::vector<MVT> NodeTys;
3797 SDValue Ops[4], Result, MFFSreg, InFlag, FPreg;
3799 NodeTys.push_back(MVT::f64); // Return register
3800 NodeTys.push_back(MVT::Flag); // Returns a flag for later insns
3801 Result = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0);
3802 MFFSreg = Result.getValue(0);
3803 InFlag = Result.getValue(1);
3806 NodeTys.push_back(MVT::Flag); // Returns a flag
3807 Ops[0] = DAG.getConstant(31, MVT::i32);
3809 Result = DAG.getNode(PPCISD::MTFSB1, dl, NodeTys, Ops, 2);
3810 InFlag = Result.getValue(0);
3813 NodeTys.push_back(MVT::Flag); // Returns a flag
3814 Ops[0] = DAG.getConstant(30, MVT::i32);
3816 Result = DAG.getNode(PPCISD::MTFSB0, dl, NodeTys, Ops, 2);
3817 InFlag = Result.getValue(0);
3820 NodeTys.push_back(MVT::f64); // result of add
3821 NodeTys.push_back(MVT::Flag); // Returns a flag
3825 Result = DAG.getNode(PPCISD::FADDRTZ, dl, NodeTys, Ops, 3);
3826 FPreg = Result.getValue(0);
3827 InFlag = Result.getValue(1);
3830 NodeTys.push_back(MVT::f64);
3831 Ops[0] = DAG.getConstant(1, MVT::i32);
3835 Result = DAG.getNode(PPCISD::MTFSF, dl, NodeTys, Ops, 4);
3836 FPreg = Result.getValue(0);
3838 // We know the low half is about to be thrown away, so just use something
3840 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128,
3844 case ISD::FP_TO_SINT:
3845 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
3851 //===----------------------------------------------------------------------===//
3852 // Other Lowering Code
3853 //===----------------------------------------------------------------------===//
3856 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
3857 bool is64bit, unsigned BinOpcode) const {
3858 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
3859 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
3861 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3862 MachineFunction *F = BB->getParent();
3863 MachineFunction::iterator It = BB;
3866 unsigned dest = MI->getOperand(0).getReg();
3867 unsigned ptrA = MI->getOperand(1).getReg();
3868 unsigned ptrB = MI->getOperand(2).getReg();
3869 unsigned incr = MI->getOperand(3).getReg();
3870 DebugLoc dl = MI->getDebugLoc();
3872 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
3873 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
3874 F->insert(It, loopMBB);
3875 F->insert(It, exitMBB);
3876 exitMBB->transferSuccessors(BB);
3878 MachineRegisterInfo &RegInfo = F->getRegInfo();
3879 unsigned TmpReg = (!BinOpcode) ? incr :
3880 RegInfo.createVirtualRegister(
3881 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
3882 (const TargetRegisterClass *) &PPC::GPRCRegClass);
3886 // fallthrough --> loopMBB
3887 BB->addSuccessor(loopMBB);
3890 // l[wd]arx dest, ptr
3891 // add r0, dest, incr
3892 // st[wd]cx. r0, ptr
3894 // fallthrough --> exitMBB
3896 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
3897 .addReg(ptrA).addReg(ptrB);
3899 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
3900 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
3901 .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
3902 BuildMI(BB, dl, TII->get(PPC::BCC))
3903 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
3904 BB->addSuccessor(loopMBB);
3905 BB->addSuccessor(exitMBB);
3914 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
3915 MachineBasicBlock *BB,
3916 bool is8bit, // operation
3917 unsigned BinOpcode) const {
3918 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
3919 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
3920 // In 64 bit mode we have to use 64 bits for addresses, even though the
3921 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address
3922 // registers without caring whether they're 32 or 64, but here we're
3923 // doing actual arithmetic on the addresses.
3924 bool is64bit = PPCSubTarget.isPPC64();
3926 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3927 MachineFunction *F = BB->getParent();
3928 MachineFunction::iterator It = BB;
3931 unsigned dest = MI->getOperand(0).getReg();
3932 unsigned ptrA = MI->getOperand(1).getReg();
3933 unsigned ptrB = MI->getOperand(2).getReg();
3934 unsigned incr = MI->getOperand(3).getReg();
3935 DebugLoc dl = MI->getDebugLoc();
3937 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
3938 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
3939 F->insert(It, loopMBB);
3940 F->insert(It, exitMBB);
3941 exitMBB->transferSuccessors(BB);
3943 MachineRegisterInfo &RegInfo = F->getRegInfo();
3944 const TargetRegisterClass *RC =
3945 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
3946 (const TargetRegisterClass *) &PPC::GPRCRegClass;
3947 unsigned PtrReg = RegInfo.createVirtualRegister(RC);
3948 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
3949 unsigned ShiftReg = RegInfo.createVirtualRegister(RC);
3950 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC);
3951 unsigned MaskReg = RegInfo.createVirtualRegister(RC);
3952 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
3953 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
3954 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
3955 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC);
3956 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
3957 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
3959 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC);
3963 // fallthrough --> loopMBB
3964 BB->addSuccessor(loopMBB);
3966 // The 4-byte load must be aligned, while a char or short may be
3967 // anywhere in the word. Hence all this nasty bookkeeping code.
3968 // add ptr1, ptrA, ptrB [copy if ptrA==0]
3969 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
3970 // xori shift, shift1, 24 [16]
3971 // rlwinm ptr, ptr1, 0, 0, 29
3972 // slw incr2, incr, shift
3973 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
3974 // slw mask, mask2, shift
3976 // lwarx tmpDest, ptr
3977 // add tmp, tmpDest, incr2
3978 // andc tmp2, tmpDest, mask
3979 // and tmp3, tmp, mask
3980 // or tmp4, tmp3, tmp2
3983 // fallthrough --> exitMBB
3984 // srw dest, tmpDest, shift
3986 if (ptrA!=PPC::R0) {
3987 Ptr1Reg = RegInfo.createVirtualRegister(RC);
3988 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
3989 .addReg(ptrA).addReg(ptrB);
3993 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
3994 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
3995 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
3996 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
3998 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
3999 .addReg(Ptr1Reg).addImm(0).addImm(61);
4001 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
4002 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
4003 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg)
4004 .addReg(incr).addReg(ShiftReg);
4006 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
4008 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
4009 BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535);
4011 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
4012 .addReg(Mask2Reg).addReg(ShiftReg);
4015 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
4016 .addReg(PPC::R0).addReg(PtrReg);
4018 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
4019 .addReg(Incr2Reg).addReg(TmpDestReg);
4020 BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg)
4021 .addReg(TmpDestReg).addReg(MaskReg);
4022 BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg)
4023 .addReg(TmpReg).addReg(MaskReg);
4024 BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg)
4025 .addReg(Tmp3Reg).addReg(Tmp2Reg);
4026 BuildMI(BB, dl, TII->get(PPC::STWCX))
4027 .addReg(Tmp4Reg).addReg(PPC::R0).addReg(PtrReg);
4028 BuildMI(BB, dl, TII->get(PPC::BCC))
4029 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
4030 BB->addSuccessor(loopMBB);
4031 BB->addSuccessor(exitMBB);
4036 BuildMI(BB, dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg).addReg(ShiftReg);
4041 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
4042 MachineBasicBlock *BB) const {
4043 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
4045 // To "insert" these instructions we actually have to insert their
4046 // control-flow patterns.
4047 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4048 MachineFunction::iterator It = BB;
4051 MachineFunction *F = BB->getParent();
4053 if (MI->getOpcode() == PPC::SELECT_CC_I4 ||
4054 MI->getOpcode() == PPC::SELECT_CC_I8 ||
4055 MI->getOpcode() == PPC::SELECT_CC_F4 ||
4056 MI->getOpcode() == PPC::SELECT_CC_F8 ||
4057 MI->getOpcode() == PPC::SELECT_CC_VRRC) {
4059 // The incoming instruction knows the destination vreg to set, the
4060 // condition code register to branch on, the true/false values to
4061 // select between, and a branch opcode to use.
4066 // cmpTY ccX, r1, r2
4068 // fallthrough --> copy0MBB
4069 MachineBasicBlock *thisMBB = BB;
4070 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4071 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4072 unsigned SelectPred = MI->getOperand(4).getImm();
4073 DebugLoc dl = MI->getDebugLoc();
4074 BuildMI(BB, dl, TII->get(PPC::BCC))
4075 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
4076 F->insert(It, copy0MBB);
4077 F->insert(It, sinkMBB);
4078 // Update machine-CFG edges by transferring all successors of the current
4079 // block to the new block which will contain the Phi node for the select.
4080 sinkMBB->transferSuccessors(BB);
4081 // Next, add the true and fallthrough blocks as its successors.
4082 BB->addSuccessor(copy0MBB);
4083 BB->addSuccessor(sinkMBB);
4086 // %FalseValue = ...
4087 // # fallthrough to sinkMBB
4090 // Update machine-CFG edges
4091 BB->addSuccessor(sinkMBB);
4094 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
4097 BuildMI(BB, dl, TII->get(PPC::PHI), MI->getOperand(0).getReg())
4098 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
4099 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
4101 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
4102 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
4103 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
4104 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
4105 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
4106 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4);
4107 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
4108 BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8);
4110 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
4111 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
4112 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
4113 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
4114 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
4115 BB = EmitAtomicBinary(MI, BB, false, PPC::AND);
4116 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
4117 BB = EmitAtomicBinary(MI, BB, true, PPC::AND8);
4119 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
4120 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
4121 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
4122 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
4123 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
4124 BB = EmitAtomicBinary(MI, BB, false, PPC::OR);
4125 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
4126 BB = EmitAtomicBinary(MI, BB, true, PPC::OR8);
4128 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
4129 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
4130 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
4131 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
4132 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
4133 BB = EmitAtomicBinary(MI, BB, false, PPC::XOR);
4134 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
4135 BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8);
4137 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
4138 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC);
4139 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
4140 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC);
4141 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
4142 BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC);
4143 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
4144 BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8);
4146 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
4147 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
4148 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
4149 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
4150 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
4151 BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF);
4152 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
4153 BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8);
4155 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8)
4156 BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
4157 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16)
4158 BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
4159 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32)
4160 BB = EmitAtomicBinary(MI, BB, false, 0);
4161 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64)
4162 BB = EmitAtomicBinary(MI, BB, true, 0);
4164 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
4165 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) {
4166 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
4168 unsigned dest = MI->getOperand(0).getReg();
4169 unsigned ptrA = MI->getOperand(1).getReg();
4170 unsigned ptrB = MI->getOperand(2).getReg();
4171 unsigned oldval = MI->getOperand(3).getReg();
4172 unsigned newval = MI->getOperand(4).getReg();
4173 DebugLoc dl = MI->getDebugLoc();
4175 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
4176 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
4177 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
4178 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
4179 F->insert(It, loop1MBB);
4180 F->insert(It, loop2MBB);
4181 F->insert(It, midMBB);
4182 F->insert(It, exitMBB);
4183 exitMBB->transferSuccessors(BB);
4187 // fallthrough --> loopMBB
4188 BB->addSuccessor(loop1MBB);
4191 // l[wd]arx dest, ptr
4192 // cmp[wd] dest, oldval
4195 // st[wd]cx. newval, ptr
4199 // st[wd]cx. dest, ptr
4202 BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
4203 .addReg(ptrA).addReg(ptrB);
4204 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
4205 .addReg(oldval).addReg(dest);
4206 BuildMI(BB, dl, TII->get(PPC::BCC))
4207 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
4208 BB->addSuccessor(loop2MBB);
4209 BB->addSuccessor(midMBB);
4212 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
4213 .addReg(newval).addReg(ptrA).addReg(ptrB);
4214 BuildMI(BB, dl, TII->get(PPC::BCC))
4215 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
4216 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
4217 BB->addSuccessor(loop1MBB);
4218 BB->addSuccessor(exitMBB);
4221 BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
4222 .addReg(dest).addReg(ptrA).addReg(ptrB);
4223 BB->addSuccessor(exitMBB);
4228 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
4229 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
4230 // We must use 64-bit registers for addresses when targeting 64-bit,
4231 // since we're actually doing arithmetic on them. Other registers
4233 bool is64bit = PPCSubTarget.isPPC64();
4234 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
4236 unsigned dest = MI->getOperand(0).getReg();
4237 unsigned ptrA = MI->getOperand(1).getReg();
4238 unsigned ptrB = MI->getOperand(2).getReg();
4239 unsigned oldval = MI->getOperand(3).getReg();
4240 unsigned newval = MI->getOperand(4).getReg();
4241 DebugLoc dl = MI->getDebugLoc();
4243 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
4244 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
4245 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
4246 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
4247 F->insert(It, loop1MBB);
4248 F->insert(It, loop2MBB);
4249 F->insert(It, midMBB);
4250 F->insert(It, exitMBB);
4251 exitMBB->transferSuccessors(BB);
4253 MachineRegisterInfo &RegInfo = F->getRegInfo();
4254 const TargetRegisterClass *RC =
4255 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
4256 (const TargetRegisterClass *) &PPC::GPRCRegClass;
4257 unsigned PtrReg = RegInfo.createVirtualRegister(RC);
4258 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
4259 unsigned ShiftReg = RegInfo.createVirtualRegister(RC);
4260 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC);
4261 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC);
4262 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC);
4263 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC);
4264 unsigned MaskReg = RegInfo.createVirtualRegister(RC);
4265 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
4266 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
4267 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
4268 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
4269 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
4271 unsigned TmpReg = RegInfo.createVirtualRegister(RC);
4274 // fallthrough --> loopMBB
4275 BB->addSuccessor(loop1MBB);
4277 // The 4-byte load must be aligned, while a char or short may be
4278 // anywhere in the word. Hence all this nasty bookkeeping code.
4279 // add ptr1, ptrA, ptrB [copy if ptrA==0]
4280 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
4281 // xori shift, shift1, 24 [16]
4282 // rlwinm ptr, ptr1, 0, 0, 29
4283 // slw newval2, newval, shift
4284 // slw oldval2, oldval,shift
4285 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
4286 // slw mask, mask2, shift
4287 // and newval3, newval2, mask
4288 // and oldval3, oldval2, mask
4290 // lwarx tmpDest, ptr
4291 // and tmp, tmpDest, mask
4292 // cmpw tmp, oldval3
4295 // andc tmp2, tmpDest, mask
4296 // or tmp4, tmp2, newval3
4301 // stwcx. tmpDest, ptr
4303 // srw dest, tmpDest, shift
4304 if (ptrA!=PPC::R0) {
4305 Ptr1Reg = RegInfo.createVirtualRegister(RC);
4306 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
4307 .addReg(ptrA).addReg(ptrB);
4311 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
4312 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
4313 BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
4314 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
4316 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
4317 .addReg(Ptr1Reg).addImm(0).addImm(61);
4319 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
4320 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
4321 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
4322 .addReg(newval).addReg(ShiftReg);
4323 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
4324 .addReg(oldval).addReg(ShiftReg);
4326 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
4328 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
4329 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
4330 .addReg(Mask3Reg).addImm(65535);
4332 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
4333 .addReg(Mask2Reg).addReg(ShiftReg);
4334 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
4335 .addReg(NewVal2Reg).addReg(MaskReg);
4336 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
4337 .addReg(OldVal2Reg).addReg(MaskReg);
4340 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
4341 .addReg(PPC::R0).addReg(PtrReg);
4342 BuildMI(BB, dl, TII->get(PPC::AND),TmpReg)
4343 .addReg(TmpDestReg).addReg(MaskReg);
4344 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
4345 .addReg(TmpReg).addReg(OldVal3Reg);
4346 BuildMI(BB, dl, TII->get(PPC::BCC))
4347 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
4348 BB->addSuccessor(loop2MBB);
4349 BB->addSuccessor(midMBB);
4352 BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg)
4353 .addReg(TmpDestReg).addReg(MaskReg);
4354 BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg)
4355 .addReg(Tmp2Reg).addReg(NewVal3Reg);
4356 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg)
4357 .addReg(PPC::R0).addReg(PtrReg);
4358 BuildMI(BB, dl, TII->get(PPC::BCC))
4359 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
4360 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
4361 BB->addSuccessor(loop1MBB);
4362 BB->addSuccessor(exitMBB);
4365 BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg)
4366 .addReg(PPC::R0).addReg(PtrReg);
4367 BB->addSuccessor(exitMBB);
4372 BuildMI(BB, dl, TII->get(PPC::SRW),dest).addReg(TmpReg).addReg(ShiftReg);
4374 assert(0 && "Unexpected instr type to insert");
4377 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
4381 //===----------------------------------------------------------------------===//
4382 // Target Optimization Hooks
4383 //===----------------------------------------------------------------------===//
4385 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
4386 DAGCombinerInfo &DCI) const {
4387 TargetMachine &TM = getTargetMachine();
4388 SelectionDAG &DAG = DCI.DAG;
4389 DebugLoc dl = N->getDebugLoc();
4390 switch (N->getOpcode()) {
4393 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
4394 if (C->getZExtValue() == 0) // 0 << V -> 0.
4395 return N->getOperand(0);
4399 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
4400 if (C->getZExtValue() == 0) // 0 >>u V -> 0.
4401 return N->getOperand(0);
4405 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
4406 if (C->getZExtValue() == 0 || // 0 >>s V -> 0.
4407 C->isAllOnesValue()) // -1 >>s V -> -1.
4408 return N->getOperand(0);
4412 case ISD::SINT_TO_FP:
4413 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
4414 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
4415 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
4416 // We allow the src/dst to be either f32/f64, but the intermediate
4417 // type must be i64.
4418 if (N->getOperand(0).getValueType() == MVT::i64 &&
4419 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) {
4420 SDValue Val = N->getOperand(0).getOperand(0);
4421 if (Val.getValueType() == MVT::f32) {
4422 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
4423 DCI.AddToWorklist(Val.getNode());
4426 Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val);
4427 DCI.AddToWorklist(Val.getNode());
4428 Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val);
4429 DCI.AddToWorklist(Val.getNode());
4430 if (N->getValueType(0) == MVT::f32) {
4431 Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val,
4432 DAG.getIntPtrConstant(0));
4433 DCI.AddToWorklist(Val.getNode());
4436 } else if (N->getOperand(0).getValueType() == MVT::i32) {
4437 // If the intermediate type is i32, we can avoid the load/store here
4444 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
4445 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
4446 !cast<StoreSDNode>(N)->isTruncatingStore() &&
4447 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
4448 N->getOperand(1).getValueType() == MVT::i32 &&
4449 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) {
4450 SDValue Val = N->getOperand(1).getOperand(0);
4451 if (Val.getValueType() == MVT::f32) {
4452 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
4453 DCI.AddToWorklist(Val.getNode());
4455 Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val);
4456 DCI.AddToWorklist(Val.getNode());
4458 Val = DAG.getNode(PPCISD::STFIWX, dl, MVT::Other, N->getOperand(0), Val,
4459 N->getOperand(2), N->getOperand(3));
4460 DCI.AddToWorklist(Val.getNode());
4464 // Turn STORE (BSWAP) -> sthbrx/stwbrx.
4465 if (N->getOperand(1).getOpcode() == ISD::BSWAP &&
4466 N->getOperand(1).getNode()->hasOneUse() &&
4467 (N->getOperand(1).getValueType() == MVT::i32 ||
4468 N->getOperand(1).getValueType() == MVT::i16)) {
4469 SDValue BSwapOp = N->getOperand(1).getOperand(0);
4470 // Do an any-extend to 32-bits if this is a half-word input.
4471 if (BSwapOp.getValueType() == MVT::i16)
4472 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
4474 return DAG.getNode(PPCISD::STBRX, dl, MVT::Other, N->getOperand(0),
4475 BSwapOp, N->getOperand(2), N->getOperand(3),
4476 DAG.getValueType(N->getOperand(1).getValueType()));
4480 // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
4481 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
4482 N->getOperand(0).hasOneUse() &&
4483 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) {
4484 SDValue Load = N->getOperand(0);
4485 LoadSDNode *LD = cast<LoadSDNode>(Load);
4486 // Create the byte-swapping load.
4487 std::vector<MVT> VTs;
4488 VTs.push_back(MVT::i32);
4489 VTs.push_back(MVT::Other);
4490 SDValue MO = DAG.getMemOperand(LD->getMemOperand());
4492 LD->getChain(), // Chain
4493 LD->getBasePtr(), // Ptr
4495 DAG.getValueType(N->getValueType(0)) // VT
4497 SDValue BSLoad = DAG.getNode(PPCISD::LBRX, dl, VTs, Ops, 4);
4499 // If this is an i16 load, insert the truncate.
4500 SDValue ResVal = BSLoad;
4501 if (N->getValueType(0) == MVT::i16)
4502 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
4504 // First, combine the bswap away. This makes the value produced by the
4506 DCI.CombineTo(N, ResVal);
4508 // Next, combine the load away, we give it a bogus result value but a real
4509 // chain result. The result value is dead because the bswap is dead.
4510 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
4512 // Return N so it doesn't get rechecked!
4513 return SDValue(N, 0);
4517 case PPCISD::VCMP: {
4518 // If a VCMPo node already exists with exactly the same operands as this
4519 // node, use its result instead of this node (VCMPo computes both a CR6 and
4520 // a normal output).
4522 if (!N->getOperand(0).hasOneUse() &&
4523 !N->getOperand(1).hasOneUse() &&
4524 !N->getOperand(2).hasOneUse()) {
4526 // Scan all of the users of the LHS, looking for VCMPo's that match.
4527 SDNode *VCMPoNode = 0;
4529 SDNode *LHSN = N->getOperand(0).getNode();
4530 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
4532 if (UI->getOpcode() == PPCISD::VCMPo &&
4533 UI->getOperand(1) == N->getOperand(1) &&
4534 UI->getOperand(2) == N->getOperand(2) &&
4535 UI->getOperand(0) == N->getOperand(0)) {
4540 // If there is no VCMPo node, or if the flag value has a single use, don't
4542 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
4545 // Look at the (necessarily single) use of the flag value. If it has a
4546 // chain, this transformation is more complex. Note that multiple things
4547 // could use the value result, which we should ignore.
4548 SDNode *FlagUser = 0;
4549 for (SDNode::use_iterator UI = VCMPoNode->use_begin();
4550 FlagUser == 0; ++UI) {
4551 assert(UI != VCMPoNode->use_end() && "Didn't find user!");
4553 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
4554 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
4561 // If the user is a MFCR instruction, we know this is safe. Otherwise we
4562 // give up for right now.
4563 if (FlagUser->getOpcode() == PPCISD::MFCR)
4564 return SDValue(VCMPoNode, 0);
4569 // If this is a branch on an altivec predicate comparison, lower this so
4570 // that we don't have to do a MFCR: instead, branch directly on CR6. This
4571 // lowering is done pre-legalize, because the legalizer lowers the predicate
4572 // compare down to code that is difficult to reassemble.
4573 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
4574 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
4578 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
4579 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
4580 getAltivecCompareInfo(LHS, CompareOpc, isDot)) {
4581 assert(isDot && "Can't compare against a vector result!");
4583 // If this is a comparison against something other than 0/1, then we know
4584 // that the condition is never/always true.
4585 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
4586 if (Val != 0 && Val != 1) {
4587 if (CC == ISD::SETEQ) // Cond never true, remove branch.
4588 return N->getOperand(0);
4589 // Always !=, turn it into an unconditional branch.
4590 return DAG.getNode(ISD::BR, dl, MVT::Other,
4591 N->getOperand(0), N->getOperand(4));
4594 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
4596 // Create the PPCISD altivec 'dot' comparison node.
4597 std::vector<MVT> VTs;
4599 LHS.getOperand(2), // LHS of compare
4600 LHS.getOperand(3), // RHS of compare
4601 DAG.getConstant(CompareOpc, MVT::i32)
4603 VTs.push_back(LHS.getOperand(2).getValueType());
4604 VTs.push_back(MVT::Flag);
4605 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
4607 // Unpack the result based on how the target uses it.
4608 PPC::Predicate CompOpc;
4609 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
4610 default: // Can't happen, don't crash on invalid number though.
4611 case 0: // Branch on the value of the EQ bit of CR6.
4612 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
4614 case 1: // Branch on the inverted value of the EQ bit of CR6.
4615 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
4617 case 2: // Branch on the value of the LT bit of CR6.
4618 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
4620 case 3: // Branch on the inverted value of the LT bit of CR6.
4621 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
4625 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
4626 DAG.getConstant(CompOpc, MVT::i32),
4627 DAG.getRegister(PPC::CR6, MVT::i32),
4628 N->getOperand(4), CompNode.getValue(1));
4637 //===----------------------------------------------------------------------===//
4638 // Inline Assembly Support
4639 //===----------------------------------------------------------------------===//
4641 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
4645 const SelectionDAG &DAG,
4646 unsigned Depth) const {
4647 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
4648 switch (Op.getOpcode()) {
4650 case PPCISD::LBRX: {
4651 // lhbrx is known to have the top bits cleared out.
4652 if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16)
4653 KnownZero = 0xFFFF0000;
4656 case ISD::INTRINSIC_WO_CHAIN: {
4657 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
4659 case Intrinsic::ppc_altivec_vcmpbfp_p:
4660 case Intrinsic::ppc_altivec_vcmpeqfp_p:
4661 case Intrinsic::ppc_altivec_vcmpequb_p:
4662 case Intrinsic::ppc_altivec_vcmpequh_p:
4663 case Intrinsic::ppc_altivec_vcmpequw_p:
4664 case Intrinsic::ppc_altivec_vcmpgefp_p:
4665 case Intrinsic::ppc_altivec_vcmpgtfp_p:
4666 case Intrinsic::ppc_altivec_vcmpgtsb_p:
4667 case Intrinsic::ppc_altivec_vcmpgtsh_p:
4668 case Intrinsic::ppc_altivec_vcmpgtsw_p:
4669 case Intrinsic::ppc_altivec_vcmpgtub_p:
4670 case Intrinsic::ppc_altivec_vcmpgtuh_p:
4671 case Intrinsic::ppc_altivec_vcmpgtuw_p:
4672 KnownZero = ~1U; // All bits but the low one are known to be zero.
4680 /// getConstraintType - Given a constraint, return the type of
4681 /// constraint it is for this target.
4682 PPCTargetLowering::ConstraintType
4683 PPCTargetLowering::getConstraintType(const std::string &Constraint) const {
4684 if (Constraint.size() == 1) {
4685 switch (Constraint[0]) {
4692 return C_RegisterClass;
4695 return TargetLowering::getConstraintType(Constraint);
4698 std::pair<unsigned, const TargetRegisterClass*>
4699 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
4701 if (Constraint.size() == 1) {
4702 // GCC RS6000 Constraint Letters
4703 switch (Constraint[0]) {
4706 if (VT == MVT::i64 && PPCSubTarget.isPPC64())
4707 return std::make_pair(0U, PPC::G8RCRegisterClass);
4708 return std::make_pair(0U, PPC::GPRCRegisterClass);
4711 return std::make_pair(0U, PPC::F4RCRegisterClass);
4712 else if (VT == MVT::f64)
4713 return std::make_pair(0U, PPC::F8RCRegisterClass);
4716 return std::make_pair(0U, PPC::VRRCRegisterClass);
4718 return std::make_pair(0U, PPC::CRRCRegisterClass);
4722 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
4726 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
4727 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is true
4728 /// it means one of the asm constraint of the inline asm instruction being
4729 /// processed is 'm'.
4730 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char Letter,
4732 std::vector<SDValue>&Ops,
4733 SelectionDAG &DAG) const {
4734 SDValue Result(0,0);
4745 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
4746 if (!CST) return; // Must be an immediate to match.
4747 unsigned Value = CST->getZExtValue();
4749 default: assert(0 && "Unknown constraint letter!");
4750 case 'I': // "I" is a signed 16-bit constant.
4751 if ((short)Value == (int)Value)
4752 Result = DAG.getTargetConstant(Value, Op.getValueType());
4754 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
4755 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
4756 if ((short)Value == 0)
4757 Result = DAG.getTargetConstant(Value, Op.getValueType());
4759 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
4760 if ((Value >> 16) == 0)
4761 Result = DAG.getTargetConstant(Value, Op.getValueType());
4763 case 'M': // "M" is a constant that is greater than 31.
4765 Result = DAG.getTargetConstant(Value, Op.getValueType());
4767 case 'N': // "N" is a positive constant that is an exact power of two.
4768 if ((int)Value > 0 && isPowerOf2_32(Value))
4769 Result = DAG.getTargetConstant(Value, Op.getValueType());
4771 case 'O': // "O" is the constant zero.
4773 Result = DAG.getTargetConstant(Value, Op.getValueType());
4775 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
4776 if ((short)-Value == (int)-Value)
4777 Result = DAG.getTargetConstant(Value, Op.getValueType());
4784 if (Result.getNode()) {
4785 Ops.push_back(Result);
4789 // Handle standard constraint letters.
4790 TargetLowering::LowerAsmOperandForConstraint(Op, Letter, hasMemory, Ops, DAG);
4793 // isLegalAddressingMode - Return true if the addressing mode represented
4794 // by AM is legal for this target, for a load/store of the specified type.
4795 bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM,
4796 const Type *Ty) const {
4797 // FIXME: PPC does not allow r+i addressing modes for vectors!
4799 // PPC allows a sign-extended 16-bit immediate field.
4800 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
4803 // No global is ever allowed as a base.
4807 // PPC only support r+r,
4809 case 0: // "r+i" or just "i", depending on HasBaseReg.
4812 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
4814 // Otherwise we have r+r or r+i.
4817 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
4819 // Allow 2*r as r+r.
4822 // No other scales are supported.
4829 /// isLegalAddressImmediate - Return true if the integer value can be used
4830 /// as the offset of the target addressing mode for load / store of the
4832 bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,const Type *Ty) const{
4833 // PPC allows a sign-extended 16-bit immediate field.
4834 return (V > -(1 << 16) && V < (1 << 16)-1);
4837 bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
4841 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) {
4842 DebugLoc dl = Op.getDebugLoc();
4843 // Depths > 0 not supported yet!
4844 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
4847 MachineFunction &MF = DAG.getMachineFunction();
4848 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4850 // Just load the return address off the stack.
4851 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
4853 // Make sure the function really does not optimize away the store of the RA
4855 FuncInfo->setLRStoreRequired();
4856 return DAG.getLoad(getPointerTy(), dl,
4857 DAG.getEntryNode(), RetAddrFI, NULL, 0);
4860 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
4861 DebugLoc dl = Op.getDebugLoc();
4862 // Depths > 0 not supported yet!
4863 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
4866 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
4867 bool isPPC64 = PtrVT == MVT::i64;
4869 MachineFunction &MF = DAG.getMachineFunction();
4870 MachineFrameInfo *MFI = MF.getFrameInfo();
4871 bool is31 = (NoFramePointerElim || MFI->hasVarSizedObjects())
4872 && MFI->getStackSize();
4875 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, is31 ? PPC::X31 : PPC::X1,
4878 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, is31 ? PPC::R31 : PPC::R1,
4883 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4884 // The PowerPC target isn't yet aware of offsets.