1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the PPCISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "PPCISelLowering.h"
15 #include "PPCMachineFunctionInfo.h"
16 #include "PPCPredicates.h"
17 #include "PPCTargetMachine.h"
18 #include "PPCPerfectShuffle.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/VectorExtras.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/PseudoSourceValue.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CallingConv.h"
29 #include "llvm/Constants.h"
30 #include "llvm/Function.h"
31 #include "llvm/Intrinsics.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
37 static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc",
38 cl::desc("enable preincrement load/store generation on PPC (experimental)"),
41 PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
42 : TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) {
46 // Use _setjmp/_longjmp instead of setjmp/longjmp.
47 setUseUnderscoreSetJmp(true);
48 setUseUnderscoreLongJmp(true);
50 // Set up the register classes.
51 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
52 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
53 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
55 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
56 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
57 setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
59 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
61 // PowerPC has pre-inc load and store's.
62 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
63 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
64 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
65 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
66 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
67 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
68 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
69 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
70 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
71 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
73 // Shortening conversions involving ppcf128 get expanded (2 regs -> 1 reg)
74 setConvertAction(MVT::ppcf128, MVT::f64, Expand);
75 setConvertAction(MVT::ppcf128, MVT::f32, Expand);
76 // This is used in the ppcf128->int sequence. Note it has different semantics
77 // from FP_ROUND: that rounds to nearest, this rounds to zero.
78 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
80 // PowerPC has no SREM/UREM instructions
81 setOperationAction(ISD::SREM, MVT::i32, Expand);
82 setOperationAction(ISD::UREM, MVT::i32, Expand);
83 setOperationAction(ISD::SREM, MVT::i64, Expand);
84 setOperationAction(ISD::UREM, MVT::i64, Expand);
86 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
87 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
88 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
89 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
90 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
91 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
92 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
93 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
94 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
96 // We don't support sin/cos/sqrt/fmod/pow
97 setOperationAction(ISD::FSIN , MVT::f64, Expand);
98 setOperationAction(ISD::FCOS , MVT::f64, Expand);
99 setOperationAction(ISD::FREM , MVT::f64, Expand);
100 setOperationAction(ISD::FPOW , MVT::f64, Expand);
101 setOperationAction(ISD::FSIN , MVT::f32, Expand);
102 setOperationAction(ISD::FCOS , MVT::f32, Expand);
103 setOperationAction(ISD::FREM , MVT::f32, Expand);
104 setOperationAction(ISD::FPOW , MVT::f32, Expand);
106 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
108 // If we're enabling GP optimizations, use hardware square root
109 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
110 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
111 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
114 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
115 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
117 // PowerPC does not have BSWAP, CTPOP or CTTZ
118 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
119 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
120 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
121 setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
122 setOperationAction(ISD::CTPOP, MVT::i64 , Expand);
123 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
125 // PowerPC does not have ROTR
126 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
127 setOperationAction(ISD::ROTR, MVT::i64 , Expand);
129 // PowerPC does not have Select
130 setOperationAction(ISD::SELECT, MVT::i32, Expand);
131 setOperationAction(ISD::SELECT, MVT::i64, Expand);
132 setOperationAction(ISD::SELECT, MVT::f32, Expand);
133 setOperationAction(ISD::SELECT, MVT::f64, Expand);
135 // PowerPC wants to turn select_cc of FP into fsel when possible.
136 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
137 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
139 // PowerPC wants to optimize integer setcc a bit
140 setOperationAction(ISD::SETCC, MVT::i32, Custom);
142 // PowerPC does not have BRCOND which requires SetCC
143 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
145 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
147 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
148 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
150 // PowerPC does not have [U|S]INT_TO_FP
151 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
152 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
154 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
155 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
156 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand);
157 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand);
159 // We cannot sextinreg(i1). Expand to shifts.
160 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
162 // Support label based line numbers.
163 setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
164 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
166 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
167 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
168 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
169 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
172 // We want to legalize GlobalAddress and ConstantPool nodes into the
173 // appropriate instructions to materialize the address.
174 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
175 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
176 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
177 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
178 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
179 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
180 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
181 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
183 // RET must be custom lowered, to meet ABI requirements.
184 setOperationAction(ISD::RET , MVT::Other, Custom);
187 setOperationAction(ISD::TRAP, MVT::Other, Legal);
189 // TRAMPOLINE is custom lowered.
190 setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
192 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
193 setOperationAction(ISD::VASTART , MVT::Other, Custom);
195 // VAARG is custom lowered with ELF 32 ABI
196 if (TM.getSubtarget<PPCSubtarget>().isELF32_ABI())
197 setOperationAction(ISD::VAARG, MVT::Other, Custom);
199 setOperationAction(ISD::VAARG, MVT::Other, Expand);
201 // Use the default implementation.
202 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
203 setOperationAction(ISD::VAEND , MVT::Other, Expand);
204 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
205 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom);
206 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
207 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
209 // We want to custom lower some of our intrinsics.
210 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
212 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
213 // They also have instructions for converting between i64 and fp.
214 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
215 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
216 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
217 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
218 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
220 // FIXME: disable this lowered code. This generates 64-bit register values,
221 // and we don't model the fact that the top part is clobbered by calls. We
222 // need to flag these together so that the value isn't live across a call.
223 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
225 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
226 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
228 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
229 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
232 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) {
233 // 64-bit PowerPC implementations can support i64 types directly
234 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
235 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
236 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
237 // 64-bit PowerPC wants to expand i128 shifts itself.
238 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
239 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
240 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
242 // 32-bit PowerPC wants to expand i64 shifts itself.
243 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
244 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
245 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
248 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
249 // First set operation action for all vector types to expand. Then we
250 // will selectively turn on ones that can be effectively codegen'd.
251 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
252 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
253 MVT VT = (MVT::SimpleValueType)i;
255 // add/sub are legal for all supported vector VT's.
256 setOperationAction(ISD::ADD , VT, Legal);
257 setOperationAction(ISD::SUB , VT, Legal);
259 // We promote all shuffles to v16i8.
260 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
261 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
263 // We promote all non-typed operations to v4i32.
264 setOperationAction(ISD::AND , VT, Promote);
265 AddPromotedToType (ISD::AND , VT, MVT::v4i32);
266 setOperationAction(ISD::OR , VT, Promote);
267 AddPromotedToType (ISD::OR , VT, MVT::v4i32);
268 setOperationAction(ISD::XOR , VT, Promote);
269 AddPromotedToType (ISD::XOR , VT, MVT::v4i32);
270 setOperationAction(ISD::LOAD , VT, Promote);
271 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32);
272 setOperationAction(ISD::SELECT, VT, Promote);
273 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
274 setOperationAction(ISD::STORE, VT, Promote);
275 AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
277 // No other operations are legal.
278 setOperationAction(ISD::MUL , VT, Expand);
279 setOperationAction(ISD::SDIV, VT, Expand);
280 setOperationAction(ISD::SREM, VT, Expand);
281 setOperationAction(ISD::UDIV, VT, Expand);
282 setOperationAction(ISD::UREM, VT, Expand);
283 setOperationAction(ISD::FDIV, VT, Expand);
284 setOperationAction(ISD::FNEG, VT, Expand);
285 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
286 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
287 setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
288 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
289 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
290 setOperationAction(ISD::UDIVREM, VT, Expand);
291 setOperationAction(ISD::SDIVREM, VT, Expand);
292 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
293 setOperationAction(ISD::FPOW, VT, Expand);
294 setOperationAction(ISD::CTPOP, VT, Expand);
295 setOperationAction(ISD::CTLZ, VT, Expand);
296 setOperationAction(ISD::CTTZ, VT, Expand);
299 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
300 // with merges, splats, etc.
301 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
303 setOperationAction(ISD::AND , MVT::v4i32, Legal);
304 setOperationAction(ISD::OR , MVT::v4i32, Legal);
305 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
306 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
307 setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
308 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
310 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
311 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
312 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
313 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
315 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
316 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
317 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
318 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
320 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
321 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
323 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
324 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
325 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
326 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
329 setShiftAmountType(MVT::i32);
330 setSetCCResultContents(ZeroOrOneSetCCResult);
332 if (TM.getSubtarget<PPCSubtarget>().isPPC64()) {
333 setStackPointerRegisterToSaveRestore(PPC::X1);
334 setExceptionPointerRegister(PPC::X3);
335 setExceptionSelectorRegister(PPC::X4);
337 setStackPointerRegisterToSaveRestore(PPC::R1);
338 setExceptionPointerRegister(PPC::R3);
339 setExceptionSelectorRegister(PPC::R4);
342 // We have target-specific dag combine patterns for the following nodes:
343 setTargetDAGCombine(ISD::SINT_TO_FP);
344 setTargetDAGCombine(ISD::STORE);
345 setTargetDAGCombine(ISD::BR_CC);
346 setTargetDAGCombine(ISD::BSWAP);
348 // Darwin long double math library functions have $LDBL128 appended.
349 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) {
350 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
351 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
352 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
353 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
354 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
355 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
356 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
357 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
358 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
359 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
362 computeRegisterProperties();
365 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
366 /// function arguments in the caller parameter area.
367 unsigned PPCTargetLowering::getByValTypeAlignment(const Type *Ty) const {
368 TargetMachine &TM = getTargetMachine();
369 // Darwin passes everything on 4 byte boundary.
370 if (TM.getSubtarget<PPCSubtarget>().isDarwin())
376 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
379 case PPCISD::FSEL: return "PPCISD::FSEL";
380 case PPCISD::FCFID: return "PPCISD::FCFID";
381 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
382 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
383 case PPCISD::STFIWX: return "PPCISD::STFIWX";
384 case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
385 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
386 case PPCISD::VPERM: return "PPCISD::VPERM";
387 case PPCISD::Hi: return "PPCISD::Hi";
388 case PPCISD::Lo: return "PPCISD::Lo";
389 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
390 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
391 case PPCISD::SRL: return "PPCISD::SRL";
392 case PPCISD::SRA: return "PPCISD::SRA";
393 case PPCISD::SHL: return "PPCISD::SHL";
394 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32";
395 case PPCISD::STD_32: return "PPCISD::STD_32";
396 case PPCISD::CALL_ELF: return "PPCISD::CALL_ELF";
397 case PPCISD::CALL_Macho: return "PPCISD::CALL_Macho";
398 case PPCISD::MTCTR: return "PPCISD::MTCTR";
399 case PPCISD::BCTRL_Macho: return "PPCISD::BCTRL_Macho";
400 case PPCISD::BCTRL_ELF: return "PPCISD::BCTRL_ELF";
401 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
402 case PPCISD::MFCR: return "PPCISD::MFCR";
403 case PPCISD::VCMP: return "PPCISD::VCMP";
404 case PPCISD::VCMPo: return "PPCISD::VCMPo";
405 case PPCISD::LBRX: return "PPCISD::LBRX";
406 case PPCISD::STBRX: return "PPCISD::STBRX";
407 case PPCISD::LARX: return "PPCISD::LARX";
408 case PPCISD::STCX: return "PPCISD::STCX";
409 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
410 case PPCISD::MFFS: return "PPCISD::MFFS";
411 case PPCISD::MTFSB0: return "PPCISD::MTFSB0";
412 case PPCISD::MTFSB1: return "PPCISD::MTFSB1";
413 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
414 case PPCISD::MTFSF: return "PPCISD::MTFSF";
415 case PPCISD::TAILCALL: return "PPCISD::TAILCALL";
416 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
421 MVT PPCTargetLowering::getSetCCResultType(const SDValue &) const {
426 //===----------------------------------------------------------------------===//
427 // Node matching predicates, for use by the tblgen matching code.
428 //===----------------------------------------------------------------------===//
430 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
431 static bool isFloatingPointZero(SDValue Op) {
432 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
433 return CFP->getValueAPF().isZero();
434 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
435 // Maybe this has already been legalized into the constant pool?
436 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
437 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
438 return CFP->getValueAPF().isZero();
443 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
444 /// true if Op is undef or if it matches the specified value.
445 static bool isConstantOrUndef(SDValue Op, unsigned Val) {
446 return Op.getOpcode() == ISD::UNDEF ||
447 cast<ConstantSDNode>(Op)->getZExtValue() == Val;
450 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
451 /// VPKUHUM instruction.
452 bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) {
454 for (unsigned i = 0; i != 16; ++i)
455 if (!isConstantOrUndef(N->getOperand(i), i*2+1))
458 for (unsigned i = 0; i != 8; ++i)
459 if (!isConstantOrUndef(N->getOperand(i), i*2+1) ||
460 !isConstantOrUndef(N->getOperand(i+8), i*2+1))
466 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
467 /// VPKUWUM instruction.
468 bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) {
470 for (unsigned i = 0; i != 16; i += 2)
471 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
472 !isConstantOrUndef(N->getOperand(i+1), i*2+3))
475 for (unsigned i = 0; i != 8; i += 2)
476 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
477 !isConstantOrUndef(N->getOperand(i+1), i*2+3) ||
478 !isConstantOrUndef(N->getOperand(i+8), i*2+2) ||
479 !isConstantOrUndef(N->getOperand(i+9), i*2+3))
485 /// isVMerge - Common function, used to match vmrg* shuffles.
487 static bool isVMerge(SDNode *N, unsigned UnitSize,
488 unsigned LHSStart, unsigned RHSStart) {
489 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
490 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
491 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
492 "Unsupported merge size!");
494 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
495 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
496 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j),
497 LHSStart+j+i*UnitSize) ||
498 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j),
499 RHSStart+j+i*UnitSize))
505 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
506 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
507 bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
509 return isVMerge(N, UnitSize, 8, 24);
510 return isVMerge(N, UnitSize, 8, 8);
513 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
514 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
515 bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
517 return isVMerge(N, UnitSize, 0, 16);
518 return isVMerge(N, UnitSize, 0, 0);
522 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
523 /// amount, otherwise return -1.
524 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
525 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
526 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
527 // Find the first non-undef value in the shuffle mask.
529 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i)
532 if (i == 16) return -1; // all undef.
534 // Otherwise, check to see if the rest of the elements are consequtively
535 // numbered from this value.
536 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getZExtValue();
537 if (ShiftAmt < i) return -1;
541 // Check the rest of the elements to see if they are consequtive.
542 for (++i; i != 16; ++i)
543 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i))
546 // Check the rest of the elements to see if they are consequtive.
547 for (++i; i != 16; ++i)
548 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15))
555 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
556 /// specifies a splat of a single element that is suitable for input to
557 /// VSPLTB/VSPLTH/VSPLTW.
558 bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) {
559 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
560 N->getNumOperands() == 16 &&
561 (EltSize == 1 || EltSize == 2 || EltSize == 4));
563 // This is a splat operation if each element of the permute is the same, and
564 // if the value doesn't reference the second vector.
565 unsigned ElementBase = 0;
566 SDValue Elt = N->getOperand(0);
567 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt))
568 ElementBase = EltV->getZExtValue();
570 return false; // FIXME: Handle UNDEF elements too!
572 if (cast<ConstantSDNode>(Elt)->getZExtValue() >= 16)
575 // Check that they are consequtive.
576 for (unsigned i = 1; i != EltSize; ++i) {
577 if (!isa<ConstantSDNode>(N->getOperand(i)) ||
578 cast<ConstantSDNode>(N->getOperand(i))->getZExtValue() != i+ElementBase)
582 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
583 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
584 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
585 assert(isa<ConstantSDNode>(N->getOperand(i)) &&
586 "Invalid VECTOR_SHUFFLE mask!");
587 for (unsigned j = 0; j != EltSize; ++j)
588 if (N->getOperand(i+j) != N->getOperand(j))
595 /// isAllNegativeZeroVector - Returns true if all elements of build_vector
597 bool PPC::isAllNegativeZeroVector(SDNode *N) {
598 assert(N->getOpcode() == ISD::BUILD_VECTOR);
599 if (PPC::isSplatShuffleMask(N, N->getNumOperands()))
600 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N))
601 return CFP->getValueAPF().isNegZero();
605 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
606 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
607 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
608 assert(isSplatShuffleMask(N, EltSize));
609 return cast<ConstantSDNode>(N->getOperand(0))->getZExtValue() / EltSize;
612 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
613 /// by using a vspltis[bhw] instruction of the specified element size, return
614 /// the constant being splatted. The ByteSize field indicates the number of
615 /// bytes of each element [124] -> [bhw].
616 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
619 // If ByteSize of the splat is bigger than the element size of the
620 // build_vector, then we have a case where we are checking for a splat where
621 // multiple elements of the buildvector are folded together into a single
622 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
623 unsigned EltSize = 16/N->getNumOperands();
624 if (EltSize < ByteSize) {
625 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
626 SDValue UniquedVals[4];
627 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
629 // See if all of the elements in the buildvector agree across.
630 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
631 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
632 // If the element isn't a constant, bail fully out.
633 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
636 if (UniquedVals[i&(Multiple-1)].getNode() == 0)
637 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
638 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
639 return SDValue(); // no match.
642 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
643 // either constant or undef values that are identical for each chunk. See
644 // if these chunks can form into a larger vspltis*.
646 // Check to see if all of the leading entries are either 0 or -1. If
647 // neither, then this won't fit into the immediate field.
648 bool LeadingZero = true;
649 bool LeadingOnes = true;
650 for (unsigned i = 0; i != Multiple-1; ++i) {
651 if (UniquedVals[i].getNode() == 0) continue; // Must have been undefs.
653 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
654 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
656 // Finally, check the least significant entry.
658 if (UniquedVals[Multiple-1].getNode() == 0)
659 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef
660 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
662 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4)
665 if (UniquedVals[Multiple-1].getNode() == 0)
666 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef
667 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
668 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
669 return DAG.getTargetConstant(Val, MVT::i32);
675 // Check to see if this buildvec has a single non-undef value in its elements.
676 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
677 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
678 if (OpVal.getNode() == 0)
679 OpVal = N->getOperand(i);
680 else if (OpVal != N->getOperand(i))
684 if (OpVal.getNode() == 0) return SDValue(); // All UNDEF: use implicit def.
686 unsigned ValSizeInBytes = 0;
688 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
689 Value = CN->getZExtValue();
690 ValSizeInBytes = CN->getValueType(0).getSizeInBits()/8;
691 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
692 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
693 Value = FloatToBits(CN->getValueAPF().convertToFloat());
697 // If the splat value is larger than the element value, then we can never do
698 // this splat. The only case that we could fit the replicated bits into our
699 // immediate field for would be zero, and we prefer to use vxor for it.
700 if (ValSizeInBytes < ByteSize) return SDValue();
702 // If the element value is larger than the splat value, cut it in half and
703 // check to see if the two halves are equal. Continue doing this until we
704 // get to ByteSize. This allows us to handle 0x01010101 as 0x01.
705 while (ValSizeInBytes > ByteSize) {
706 ValSizeInBytes >>= 1;
708 // If the top half equals the bottom half, we're still ok.
709 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
710 (Value & ((1 << (8*ValSizeInBytes))-1)))
714 // Properly sign extend the value.
715 int ShAmt = (4-ByteSize)*8;
716 int MaskVal = ((int)Value << ShAmt) >> ShAmt;
718 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
719 if (MaskVal == 0) return SDValue();
721 // Finally, if this value fits in a 5 bit sext field, return it
722 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal)
723 return DAG.getTargetConstant(MaskVal, MVT::i32);
727 //===----------------------------------------------------------------------===//
728 // Addressing Mode Selection
729 //===----------------------------------------------------------------------===//
731 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
732 /// or 64-bit immediate, and if the value can be accurately represented as a
733 /// sign extension from a 16-bit value. If so, this returns true and the
735 static bool isIntS16Immediate(SDNode *N, short &Imm) {
736 if (N->getOpcode() != ISD::Constant)
739 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
740 if (N->getValueType(0) == MVT::i32)
741 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
743 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
745 static bool isIntS16Immediate(SDValue Op, short &Imm) {
746 return isIntS16Immediate(Op.getNode(), Imm);
750 /// SelectAddressRegReg - Given the specified addressed, check to see if it
751 /// can be represented as an indexed [r+r] operation. Returns false if it
752 /// can be more efficiently represented with [r+imm].
753 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
757 if (N.getOpcode() == ISD::ADD) {
758 if (isIntS16Immediate(N.getOperand(1), imm))
760 if (N.getOperand(1).getOpcode() == PPCISD::Lo)
763 Base = N.getOperand(0);
764 Index = N.getOperand(1);
766 } else if (N.getOpcode() == ISD::OR) {
767 if (isIntS16Immediate(N.getOperand(1), imm))
768 return false; // r+i can fold it if we can.
770 // If this is an or of disjoint bitfields, we can codegen this as an add
771 // (for better address arithmetic) if the LHS and RHS of the OR are provably
773 APInt LHSKnownZero, LHSKnownOne;
774 APInt RHSKnownZero, RHSKnownOne;
775 DAG.ComputeMaskedBits(N.getOperand(0),
776 APInt::getAllOnesValue(N.getOperand(0)
777 .getValueSizeInBits()),
778 LHSKnownZero, LHSKnownOne);
780 if (LHSKnownZero.getBoolValue()) {
781 DAG.ComputeMaskedBits(N.getOperand(1),
782 APInt::getAllOnesValue(N.getOperand(1)
783 .getValueSizeInBits()),
784 RHSKnownZero, RHSKnownOne);
785 // If all of the bits are known zero on the LHS or RHS, the add won't
787 if (~(LHSKnownZero | RHSKnownZero) == 0) {
788 Base = N.getOperand(0);
789 Index = N.getOperand(1);
798 /// Returns true if the address N can be represented by a base register plus
799 /// a signed 16-bit displacement [r+imm], and if it is not better
800 /// represented as reg+reg.
801 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
802 SDValue &Base, SelectionDAG &DAG){
803 // If this can be more profitably realized as r+r, fail.
804 if (SelectAddressRegReg(N, Disp, Base, DAG))
807 if (N.getOpcode() == ISD::ADD) {
809 if (isIntS16Immediate(N.getOperand(1), imm)) {
810 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
811 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
812 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
814 Base = N.getOperand(0);
816 return true; // [r+i]
817 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
818 // Match LOAD (ADD (X, Lo(G))).
819 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
820 && "Cannot handle constant offsets yet!");
821 Disp = N.getOperand(1).getOperand(0); // The global address.
822 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
823 Disp.getOpcode() == ISD::TargetConstantPool ||
824 Disp.getOpcode() == ISD::TargetJumpTable);
825 Base = N.getOperand(0);
826 return true; // [&g+r]
828 } else if (N.getOpcode() == ISD::OR) {
830 if (isIntS16Immediate(N.getOperand(1), imm)) {
831 // If this is an or of disjoint bitfields, we can codegen this as an add
832 // (for better address arithmetic) if the LHS and RHS of the OR are
833 // provably disjoint.
834 APInt LHSKnownZero, LHSKnownOne;
835 DAG.ComputeMaskedBits(N.getOperand(0),
836 APInt::getAllOnesValue(N.getOperand(0)
837 .getValueSizeInBits()),
838 LHSKnownZero, LHSKnownOne);
840 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
841 // If all of the bits are known zero on the LHS or RHS, the add won't
843 Base = N.getOperand(0);
844 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
848 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
849 // Loading from a constant address.
851 // If this address fits entirely in a 16-bit sext immediate field, codegen
854 if (isIntS16Immediate(CN, Imm)) {
855 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
856 Base = DAG.getRegister(PPC::R0, CN->getValueType(0));
860 // Handle 32-bit sext immediates with LIS + addr mode.
861 if (CN->getValueType(0) == MVT::i32 ||
862 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
863 int Addr = (int)CN->getZExtValue();
865 // Otherwise, break this down into an LIS + disp.
866 Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
868 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32);
869 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
870 Base = SDValue(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0);
875 Disp = DAG.getTargetConstant(0, getPointerTy());
876 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
877 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
880 return true; // [r+0]
883 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
884 /// represented as an indexed [r+r] operation.
885 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
888 // Check to see if we can easily represent this as an [r+r] address. This
889 // will fail if it thinks that the address is more profitably represented as
890 // reg+imm, e.g. where imm = 0.
891 if (SelectAddressRegReg(N, Base, Index, DAG))
894 // If the operand is an addition, always emit this as [r+r], since this is
895 // better (for code size, and execution, as the memop does the add for free)
896 // than emitting an explicit add.
897 if (N.getOpcode() == ISD::ADD) {
898 Base = N.getOperand(0);
899 Index = N.getOperand(1);
903 // Otherwise, do it the hard way, using R0 as the base register.
904 Base = DAG.getRegister(PPC::R0, N.getValueType());
909 /// SelectAddressRegImmShift - Returns true if the address N can be
910 /// represented by a base register plus a signed 14-bit displacement
911 /// [r+imm*4]. Suitable for use by STD and friends.
912 bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp,
915 // If this can be more profitably realized as r+r, fail.
916 if (SelectAddressRegReg(N, Disp, Base, DAG))
919 if (N.getOpcode() == ISD::ADD) {
921 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
922 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
923 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
924 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
926 Base = N.getOperand(0);
928 return true; // [r+i]
929 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
930 // Match LOAD (ADD (X, Lo(G))).
931 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
932 && "Cannot handle constant offsets yet!");
933 Disp = N.getOperand(1).getOperand(0); // The global address.
934 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
935 Disp.getOpcode() == ISD::TargetConstantPool ||
936 Disp.getOpcode() == ISD::TargetJumpTable);
937 Base = N.getOperand(0);
938 return true; // [&g+r]
940 } else if (N.getOpcode() == ISD::OR) {
942 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
943 // If this is an or of disjoint bitfields, we can codegen this as an add
944 // (for better address arithmetic) if the LHS and RHS of the OR are
945 // provably disjoint.
946 APInt LHSKnownZero, LHSKnownOne;
947 DAG.ComputeMaskedBits(N.getOperand(0),
948 APInt::getAllOnesValue(N.getOperand(0)
949 .getValueSizeInBits()),
950 LHSKnownZero, LHSKnownOne);
951 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
952 // If all of the bits are known zero on the LHS or RHS, the add won't
954 Base = N.getOperand(0);
955 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
959 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
960 // Loading from a constant address. Verify low two bits are clear.
961 if ((CN->getZExtValue() & 3) == 0) {
962 // If this address fits entirely in a 14-bit sext immediate field, codegen
965 if (isIntS16Immediate(CN, Imm)) {
966 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy());
967 Base = DAG.getRegister(PPC::R0, CN->getValueType(0));
971 // Fold the low-part of 32-bit absolute addresses into addr mode.
972 if (CN->getValueType(0) == MVT::i32 ||
973 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
974 int Addr = (int)CN->getZExtValue();
976 // Otherwise, break this down into an LIS + disp.
977 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32);
979 Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32);
980 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
981 Base = SDValue(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0);
987 Disp = DAG.getTargetConstant(0, getPointerTy());
988 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
989 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
992 return true; // [r+0]
996 /// getPreIndexedAddressParts - returns true by value, base pointer and
997 /// offset pointer and addressing mode by reference if the node's address
998 /// can be legally represented as pre-indexed load / store address.
999 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
1001 ISD::MemIndexedMode &AM,
1002 SelectionDAG &DAG) {
1003 // Disabled by default for now.
1004 if (!EnablePPCPreinc) return false;
1008 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1009 Ptr = LD->getBasePtr();
1010 VT = LD->getMemoryVT();
1012 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
1014 Ptr = ST->getBasePtr();
1015 VT = ST->getMemoryVT();
1019 // PowerPC doesn't have preinc load/store instructions for vectors.
1023 // TODO: Check reg+reg first.
1025 // LDU/STU use reg+imm*4, others use reg+imm.
1026 if (VT != MVT::i64) {
1028 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG))
1032 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG))
1036 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1037 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
1038 // sext i32 to i64 when addr mode is r+i.
1039 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
1040 LD->getExtensionType() == ISD::SEXTLOAD &&
1041 isa<ConstantSDNode>(Offset))
1049 //===----------------------------------------------------------------------===//
1050 // LowerOperation implementation
1051 //===----------------------------------------------------------------------===//
1053 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
1054 SelectionDAG &DAG) {
1055 MVT PtrVT = Op.getValueType();
1056 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1057 Constant *C = CP->getConstVal();
1058 SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
1059 SDValue Zero = DAG.getConstant(0, PtrVT);
1061 const TargetMachine &TM = DAG.getTarget();
1063 SDValue Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero);
1064 SDValue Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero);
1066 // If this is a non-darwin platform, we don't support non-static relo models
1068 if (TM.getRelocationModel() == Reloc::Static ||
1069 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
1070 // Generate non-pic code that has direct accesses to the constant pool.
1071 // The address of the global is just (hi(&g)+lo(&g)).
1072 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1075 if (TM.getRelocationModel() == Reloc::PIC_) {
1076 // With PIC, the first instruction is actually "GR+hi(&G)".
1077 Hi = DAG.getNode(ISD::ADD, PtrVT,
1078 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
1081 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1085 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
1086 MVT PtrVT = Op.getValueType();
1087 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1088 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
1089 SDValue Zero = DAG.getConstant(0, PtrVT);
1091 const TargetMachine &TM = DAG.getTarget();
1093 SDValue Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero);
1094 SDValue Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero);
1096 // If this is a non-darwin platform, we don't support non-static relo models
1098 if (TM.getRelocationModel() == Reloc::Static ||
1099 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
1100 // Generate non-pic code that has direct accesses to the constant pool.
1101 // The address of the global is just (hi(&g)+lo(&g)).
1102 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1105 if (TM.getRelocationModel() == Reloc::PIC_) {
1106 // With PIC, the first instruction is actually "GR+hi(&G)".
1107 Hi = DAG.getNode(ISD::ADD, PtrVT,
1108 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
1111 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1115 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1116 SelectionDAG &DAG) {
1117 assert(0 && "TLS not implemented for PPC.");
1118 return SDValue(); // Not reached
1121 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
1122 SelectionDAG &DAG) {
1123 MVT PtrVT = Op.getValueType();
1124 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
1125 GlobalValue *GV = GSDN->getGlobal();
1126 SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
1127 // If it's a debug information descriptor, don't mess with it.
1128 if (DAG.isVerifiedDebugInfoDesc(Op))
1130 SDValue Zero = DAG.getConstant(0, PtrVT);
1132 const TargetMachine &TM = DAG.getTarget();
1134 SDValue Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero);
1135 SDValue Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero);
1137 // If this is a non-darwin platform, we don't support non-static relo models
1139 if (TM.getRelocationModel() == Reloc::Static ||
1140 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
1141 // Generate non-pic code that has direct accesses to globals.
1142 // The address of the global is just (hi(&g)+lo(&g)).
1143 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1146 if (TM.getRelocationModel() == Reloc::PIC_) {
1147 // With PIC, the first instruction is actually "GR+hi(&G)".
1148 Hi = DAG.getNode(ISD::ADD, PtrVT,
1149 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
1152 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1154 if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV))
1157 // If the global is weak or external, we have to go through the lazy
1159 return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, NULL, 0);
1162 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
1163 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1165 // If we're comparing for equality to zero, expose the fact that this is
1166 // implented as a ctlz/srl pair on ppc, so that the dag combiner can
1167 // fold the new nodes.
1168 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1169 if (C->isNullValue() && CC == ISD::SETEQ) {
1170 MVT VT = Op.getOperand(0).getValueType();
1171 SDValue Zext = Op.getOperand(0);
1172 if (VT.bitsLT(MVT::i32)) {
1174 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0));
1176 unsigned Log2b = Log2_32(VT.getSizeInBits());
1177 SDValue Clz = DAG.getNode(ISD::CTLZ, VT, Zext);
1178 SDValue Scc = DAG.getNode(ISD::SRL, VT, Clz,
1179 DAG.getConstant(Log2b, MVT::i32));
1180 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc);
1182 // Leave comparisons against 0 and -1 alone for now, since they're usually
1183 // optimized. FIXME: revisit this when we can custom lower all setcc
1185 if (C->isAllOnesValue() || C->isNullValue())
1189 // If we have an integer seteq/setne, turn it into a compare against zero
1190 // by xor'ing the rhs with the lhs, which is faster than setting a
1191 // condition register, reading it back out, and masking the correct bit. The
1192 // normal approach here uses sub to do this instead of xor. Using xor exposes
1193 // the result to other bit-twiddling opportunities.
1194 MVT LHSVT = Op.getOperand(0).getValueType();
1195 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1196 MVT VT = Op.getValueType();
1197 SDValue Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0),
1199 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC);
1204 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
1205 int VarArgsFrameIndex,
1206 int VarArgsStackOffset,
1207 unsigned VarArgsNumGPR,
1208 unsigned VarArgsNumFPR,
1209 const PPCSubtarget &Subtarget) {
1211 assert(0 && "VAARG in ELF32 ABI not implemented yet!");
1212 return SDValue(); // Not reached
1215 SDValue PPCTargetLowering::LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
1216 SDValue Chain = Op.getOperand(0);
1217 SDValue Trmp = Op.getOperand(1); // trampoline
1218 SDValue FPtr = Op.getOperand(2); // nested function
1219 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
1221 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1222 bool isPPC64 = (PtrVT == MVT::i64);
1223 const Type *IntPtrTy =
1224 DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType();
1226 TargetLowering::ArgListTy Args;
1227 TargetLowering::ArgListEntry Entry;
1229 Entry.Ty = IntPtrTy;
1230 Entry.Node = Trmp; Args.push_back(Entry);
1232 // TrampSize == (isPPC64 ? 48 : 40);
1233 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40,
1234 isPPC64 ? MVT::i64 : MVT::i32);
1235 Args.push_back(Entry);
1237 Entry.Node = FPtr; Args.push_back(Entry);
1238 Entry.Node = Nest; Args.push_back(Entry);
1240 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
1241 std::pair<SDValue, SDValue> CallResult =
1242 LowerCallTo(Chain, Op.getValueType().getTypeForMVT(), false, false,
1243 false, false, CallingConv::C, false,
1244 DAG.getExternalSymbol("__trampoline_setup", PtrVT),
1248 { CallResult.first, CallResult.second };
1250 return DAG.getMergeValues(Ops, 2, false);
1253 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
1254 int VarArgsFrameIndex,
1255 int VarArgsStackOffset,
1256 unsigned VarArgsNumGPR,
1257 unsigned VarArgsNumFPR,
1258 const PPCSubtarget &Subtarget) {
1260 if (Subtarget.isMachoABI()) {
1261 // vastart just stores the address of the VarArgsFrameIndex slot into the
1262 // memory location argument.
1263 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1264 SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1265 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1266 return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0);
1269 // For ELF 32 ABI we follow the layout of the va_list struct.
1270 // We suppose the given va_list is already allocated.
1273 // char gpr; /* index into the array of 8 GPRs
1274 // * stored in the register save area
1275 // * gpr=0 corresponds to r3,
1276 // * gpr=1 to r4, etc.
1278 // char fpr; /* index into the array of 8 FPRs
1279 // * stored in the register save area
1280 // * fpr=0 corresponds to f1,
1281 // * fpr=1 to f2, etc.
1283 // char *overflow_arg_area;
1284 // /* location on stack that holds
1285 // * the next overflow argument
1287 // char *reg_save_area;
1288 // /* where r3:r10 and f1:f8 (if saved)
1294 SDValue ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i8);
1295 SDValue ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8);
1298 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1300 SDValue StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT);
1301 SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1303 uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
1304 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT);
1306 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
1307 SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT);
1309 uint64_t FPROffset = 1;
1310 SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT);
1312 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1314 // Store first byte : number of int regs
1315 SDValue firstStore = DAG.getStore(Op.getOperand(0), ArgGPR,
1316 Op.getOperand(1), SV, 0);
1317 uint64_t nextOffset = FPROffset;
1318 SDValue nextPtr = DAG.getNode(ISD::ADD, PtrVT, Op.getOperand(1),
1321 // Store second byte : number of float regs
1322 SDValue secondStore =
1323 DAG.getStore(firstStore, ArgFPR, nextPtr, SV, nextOffset);
1324 nextOffset += StackOffset;
1325 nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstStackOffset);
1327 // Store second word : arguments given on stack
1328 SDValue thirdStore =
1329 DAG.getStore(secondStore, StackOffsetFI, nextPtr, SV, nextOffset);
1330 nextOffset += FrameOffset;
1331 nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstFrameOffset);
1333 // Store third word : arguments given in registers
1334 return DAG.getStore(thirdStore, FR, nextPtr, SV, nextOffset);
1338 #include "PPCGenCallingConv.inc"
1340 /// GetFPR - Get the set of FP registers that should be allocated for arguments,
1341 /// depending on which subtarget is selected.
1342 static const unsigned *GetFPR(const PPCSubtarget &Subtarget) {
1343 if (Subtarget.isMachoABI()) {
1344 static const unsigned FPR[] = {
1345 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1346 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
1352 static const unsigned FPR[] = {
1353 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1359 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
1361 static unsigned CalculateStackSlotSize(SDValue Arg, ISD::ArgFlagsTy Flags,
1362 bool isVarArg, unsigned PtrByteSize) {
1363 MVT ArgVT = Arg.getValueType();
1364 unsigned ArgSize =ArgVT.getSizeInBits()/8;
1365 if (Flags.isByVal())
1366 ArgSize = Flags.getByValSize();
1367 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
1373 PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op,
1375 int &VarArgsFrameIndex,
1376 int &VarArgsStackOffset,
1377 unsigned &VarArgsNumGPR,
1378 unsigned &VarArgsNumFPR,
1379 const PPCSubtarget &Subtarget) {
1380 // TODO: add description of PPC stack frame format, or at least some docs.
1382 MachineFunction &MF = DAG.getMachineFunction();
1383 MachineFrameInfo *MFI = MF.getFrameInfo();
1384 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1385 SmallVector<SDValue, 8> ArgValues;
1386 SDValue Root = Op.getOperand(0);
1387 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
1389 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1390 bool isPPC64 = PtrVT == MVT::i64;
1391 bool isMachoABI = Subtarget.isMachoABI();
1392 bool isELF32_ABI = Subtarget.isELF32_ABI();
1393 // Potential tail calls could cause overwriting of argument stack slots.
1394 unsigned CC = MF.getFunction()->getCallingConv();
1395 bool isImmutable = !(PerformTailCallOpt && (CC==CallingConv::Fast));
1396 unsigned PtrByteSize = isPPC64 ? 8 : 4;
1398 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
1399 // Area that is at least reserved in caller of this function.
1400 unsigned MinReservedArea = ArgOffset;
1402 static const unsigned GPR_32[] = { // 32-bit registers.
1403 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
1404 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1406 static const unsigned GPR_64[] = { // 64-bit registers.
1407 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
1408 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
1411 static const unsigned *FPR = GetFPR(Subtarget);
1413 static const unsigned VR[] = {
1414 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
1415 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
1418 const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
1419 const unsigned Num_FPR_Regs = isMachoABI ? 13 : 8;
1420 const unsigned Num_VR_Regs = array_lengthof( VR);
1422 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
1424 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
1426 // In 32-bit non-varargs functions, the stack space for vectors is after the
1427 // stack space for non-vectors. We do not use this space unless we have
1428 // too many vectors to fit in registers, something that only occurs in
1429 // constructed examples:), but we have to walk the arglist to figure
1430 // that out...for the pathological case, compute VecArgOffset as the
1431 // start of the vector parameter area. Computing VecArgOffset is the
1432 // entire point of the following loop.
1433 // Altivec is not mentioned in the ppc32 Elf Supplement, so I'm not trying
1434 // to handle Elf here.
1435 unsigned VecArgOffset = ArgOffset;
1436 if (!isVarArg && !isPPC64) {
1437 for (unsigned ArgNo = 0, e = Op.getNode()->getNumValues()-1; ArgNo != e;
1439 MVT ObjectVT = Op.getValue(ArgNo).getValueType();
1440 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1441 ISD::ArgFlagsTy Flags =
1442 cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags();
1444 if (Flags.isByVal()) {
1445 // ObjSize is the true size, ArgSize rounded up to multiple of regs.
1446 ObjSize = Flags.getByValSize();
1448 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
1449 VecArgOffset += ArgSize;
1453 switch(ObjectVT.getSimpleVT()) {
1454 default: assert(0 && "Unhandled argument type!");
1457 VecArgOffset += isPPC64 ? 8 : 4;
1459 case MVT::i64: // PPC64
1467 // Nothing to do, we're only looking at Nonvector args here.
1472 // We've found where the vector parameter area in memory is. Skip the
1473 // first 12 parameters; these don't use that memory.
1474 VecArgOffset = ((VecArgOffset+15)/16)*16;
1475 VecArgOffset += 12*16;
1477 // Add DAG nodes to load the arguments or copy them out of registers. On
1478 // entry to a function on PPC, the arguments start after the linkage area,
1479 // although the first ones are often in registers.
1481 // In the ELF 32 ABI, GPRs and stack are double word align: an argument
1482 // represented with two words (long long or double) must be copied to an
1483 // even GPR_idx value or to an even ArgOffset value.
1485 SmallVector<SDValue, 8> MemOps;
1486 unsigned nAltivecParamsAtEnd = 0;
1487 for (unsigned ArgNo = 0, e = Op.getNode()->getNumValues() - 1;
1488 ArgNo != e; ++ArgNo) {
1490 bool needsLoad = false;
1491 MVT ObjectVT = Op.getValue(ArgNo).getValueType();
1492 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1493 unsigned ArgSize = ObjSize;
1494 ISD::ArgFlagsTy Flags =
1495 cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags();
1496 // See if next argument requires stack alignment in ELF
1497 bool Align = Flags.isSplit();
1499 unsigned CurArgOffset = ArgOffset;
1501 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
1502 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
1503 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
1504 if (isVarArg || isPPC64) {
1505 MinReservedArea = ((MinReservedArea+15)/16)*16;
1506 MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo),
1510 } else nAltivecParamsAtEnd++;
1512 // Calculate min reserved area.
1513 MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo),
1518 // FIXME alignment for ELF may not be right
1519 // FIXME the codegen can be much improved in some cases.
1520 // We do not have to keep everything in memory.
1521 if (Flags.isByVal()) {
1522 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
1523 ObjSize = Flags.getByValSize();
1524 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
1525 // Double word align in ELF
1526 if (Align && isELF32_ABI) GPR_idx += (GPR_idx % 2);
1527 // Objects of size 1 and 2 are right justified, everything else is
1528 // left justified. This means the memory address is adjusted forwards.
1529 if (ObjSize==1 || ObjSize==2) {
1530 CurArgOffset = CurArgOffset + (4 - ObjSize);
1532 // The value of the object is its address.
1533 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset);
1534 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1535 ArgValues.push_back(FIN);
1536 if (ObjSize==1 || ObjSize==2) {
1537 if (GPR_idx != Num_GPR_Regs) {
1538 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
1539 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1540 SDValue Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
1541 SDValue Store = DAG.getTruncStore(Val.getValue(1), Val, FIN,
1542 NULL, 0, ObjSize==1 ? MVT::i8 : MVT::i16 );
1543 MemOps.push_back(Store);
1545 if (isMachoABI) ArgOffset += PtrByteSize;
1547 ArgOffset += PtrByteSize;
1551 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
1552 // Store whatever pieces of the object are in registers
1553 // to memory. ArgVal will be address of the beginning of
1555 if (GPR_idx != Num_GPR_Regs) {
1556 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
1557 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1558 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset);
1559 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1560 SDValue Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
1561 SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
1562 MemOps.push_back(Store);
1564 if (isMachoABI) ArgOffset += PtrByteSize;
1566 ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
1573 switch (ObjectVT.getSimpleVT()) {
1574 default: assert(0 && "Unhandled argument type!");
1577 // Double word align in ELF
1578 if (Align && isELF32_ABI) GPR_idx += (GPR_idx % 2);
1580 if (GPR_idx != Num_GPR_Regs) {
1581 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
1582 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1583 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32);
1587 ArgSize = PtrByteSize;
1589 // Stack align in ELF
1590 if (needsLoad && Align && isELF32_ABI)
1591 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
1592 // All int arguments reserve stack space in Macho ABI.
1593 if (isMachoABI || needsLoad) ArgOffset += PtrByteSize;
1597 case MVT::i64: // PPC64
1598 if (GPR_idx != Num_GPR_Regs) {
1599 unsigned VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
1600 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1601 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64);
1603 if (ObjectVT == MVT::i32) {
1604 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
1605 // value to MVT::i64 and then truncate to the correct register size.
1607 ArgVal = DAG.getNode(ISD::AssertSext, MVT::i64, ArgVal,
1608 DAG.getValueType(ObjectVT));
1609 else if (Flags.isZExt())
1610 ArgVal = DAG.getNode(ISD::AssertZext, MVT::i64, ArgVal,
1611 DAG.getValueType(ObjectVT));
1613 ArgVal = DAG.getNode(ISD::TRUNCATE, MVT::i32, ArgVal);
1619 ArgSize = PtrByteSize;
1621 // All int arguments reserve stack space in Macho ABI.
1622 if (isMachoABI || needsLoad) ArgOffset += 8;
1627 // Every 4 bytes of argument space consumes one of the GPRs available for
1628 // argument passing.
1629 if (GPR_idx != Num_GPR_Regs && isMachoABI) {
1631 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
1634 if (FPR_idx != Num_FPR_Regs) {
1636 if (ObjectVT == MVT::f32)
1637 VReg = RegInfo.createVirtualRegister(&PPC::F4RCRegClass);
1639 VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
1640 RegInfo.addLiveIn(FPR[FPR_idx], VReg);
1641 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
1647 // Stack align in ELF
1648 if (needsLoad && Align && isELF32_ABI)
1649 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
1650 // All FP arguments reserve stack space in Macho ABI.
1651 if (isMachoABI || needsLoad) ArgOffset += isPPC64 ? 8 : ObjSize;
1657 // Note that vector arguments in registers don't reserve stack space,
1658 // except in varargs functions.
1659 if (VR_idx != Num_VR_Regs) {
1660 unsigned VReg = RegInfo.createVirtualRegister(&PPC::VRRCRegClass);
1661 RegInfo.addLiveIn(VR[VR_idx], VReg);
1662 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
1664 while ((ArgOffset % 16) != 0) {
1665 ArgOffset += PtrByteSize;
1666 if (GPR_idx != Num_GPR_Regs)
1670 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs);
1674 if (!isVarArg && !isPPC64) {
1675 // Vectors go after all the nonvectors.
1676 CurArgOffset = VecArgOffset;
1679 // Vectors are aligned.
1680 ArgOffset = ((ArgOffset+15)/16)*16;
1681 CurArgOffset = ArgOffset;
1689 // We need to load the argument to a virtual register if we determined above
1690 // that we ran out of physical registers of the appropriate type.
1692 int FI = MFI->CreateFixedObject(ObjSize,
1693 CurArgOffset + (ArgSize - ObjSize),
1695 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1696 ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0);
1699 ArgValues.push_back(ArgVal);
1702 // Set the size that is at least reserved in caller of this function. Tail
1703 // call optimized function's reserved stack space needs to be aligned so that
1704 // taking the difference between two stack areas will result in an aligned
1706 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1707 // Add the Altivec parameters at the end, if needed.
1708 if (nAltivecParamsAtEnd) {
1709 MinReservedArea = ((MinReservedArea+15)/16)*16;
1710 MinReservedArea += 16*nAltivecParamsAtEnd;
1713 std::max(MinReservedArea,
1714 PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI));
1715 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()->
1716 getStackAlignment();
1717 unsigned AlignMask = TargetAlign-1;
1718 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
1719 FI->setMinReservedArea(MinReservedArea);
1721 // If the function takes variable number of arguments, make a frame index for
1722 // the start of the first vararg value... for expansion of llvm.va_start.
1727 VarArgsNumGPR = GPR_idx;
1728 VarArgsNumFPR = FPR_idx;
1730 // Make room for Num_GPR_Regs, Num_FPR_Regs and for a possible frame
1732 depth = -(Num_GPR_Regs * PtrVT.getSizeInBits()/8 +
1733 Num_FPR_Regs * MVT(MVT::f64).getSizeInBits()/8 +
1734 PtrVT.getSizeInBits()/8);
1736 VarArgsStackOffset = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
1743 VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
1745 SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1747 // In ELF 32 ABI, the fixed integer arguments of a variadic function are
1748 // stored to the VarArgsFrameIndex on the stack.
1750 for (GPR_idx = 0; GPR_idx != VarArgsNumGPR; ++GPR_idx) {
1751 SDValue Val = DAG.getRegister(GPR[GPR_idx], PtrVT);
1752 SDValue Store = DAG.getStore(Root, Val, FIN, NULL, 0);
1753 MemOps.push_back(Store);
1754 // Increment the address by four for the next argument to store
1755 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
1756 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
1760 // If this function is vararg, store any remaining integer argument regs
1761 // to their spots on the stack so that they may be loaded by deferencing the
1762 // result of va_next.
1763 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
1766 VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
1768 VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
1770 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1771 SDValue Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
1772 SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
1773 MemOps.push_back(Store);
1774 // Increment the address by four for the next argument to store
1775 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
1776 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
1779 // In ELF 32 ABI, the double arguments are stored to the VarArgsFrameIndex
1782 for (FPR_idx = 0; FPR_idx != VarArgsNumFPR; ++FPR_idx) {
1783 SDValue Val = DAG.getRegister(FPR[FPR_idx], MVT::f64);
1784 SDValue Store = DAG.getStore(Root, Val, FIN, NULL, 0);
1785 MemOps.push_back(Store);
1786 // Increment the address by eight for the next argument to store
1787 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
1789 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
1792 for (; FPR_idx != Num_FPR_Regs; ++FPR_idx) {
1794 VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
1796 RegInfo.addLiveIn(FPR[FPR_idx], VReg);
1797 SDValue Val = DAG.getCopyFromReg(Root, VReg, MVT::f64);
1798 SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
1799 MemOps.push_back(Store);
1800 // Increment the address by eight for the next argument to store
1801 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
1803 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
1808 if (!MemOps.empty())
1809 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size());
1811 ArgValues.push_back(Root);
1813 // Return the new list of results.
1814 return DAG.getMergeValues(Op.getNode()->getVTList(), &ArgValues[0],
1818 /// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus
1821 CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
1826 CallSDNode *TheCall,
1827 unsigned &nAltivecParamsAtEnd) {
1828 // Count how many bytes are to be pushed on the stack, including the linkage
1829 // area, and parameter passing area. We start with 24/48 bytes, which is
1830 // prereserved space for [SP][CR][LR][3 x unused].
1831 unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
1832 unsigned NumOps = TheCall->getNumArgs();
1833 unsigned PtrByteSize = isPPC64 ? 8 : 4;
1835 // Add up all the space actually used.
1836 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
1837 // they all go in registers, but we must reserve stack space for them for
1838 // possible use by the caller. In varargs or 64-bit calls, parameters are
1839 // assigned stack space in order, with padding so Altivec parameters are
1841 nAltivecParamsAtEnd = 0;
1842 for (unsigned i = 0; i != NumOps; ++i) {
1843 SDValue Arg = TheCall->getArg(i);
1844 ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
1845 MVT ArgVT = Arg.getValueType();
1846 // Varargs Altivec parameters are padded to a 16 byte boundary.
1847 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
1848 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) {
1849 if (!isVarArg && !isPPC64) {
1850 // Non-varargs Altivec parameters go after all the non-Altivec
1851 // parameters; handle those later so we know how much padding we need.
1852 nAltivecParamsAtEnd++;
1855 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
1856 NumBytes = ((NumBytes+15)/16)*16;
1858 NumBytes += CalculateStackSlotSize(Arg, Flags, isVarArg, PtrByteSize);
1861 // Allow for Altivec parameters at the end, if needed.
1862 if (nAltivecParamsAtEnd) {
1863 NumBytes = ((NumBytes+15)/16)*16;
1864 NumBytes += 16*nAltivecParamsAtEnd;
1867 // The prolog code of the callee may store up to 8 GPR argument registers to
1868 // the stack, allowing va_start to index over them in memory if its varargs.
1869 // Because we cannot tell if this is needed on the caller side, we have to
1870 // conservatively assume that it is needed. As such, make sure we have at
1871 // least enough stack space for the caller to store the 8 GPRs.
1872 NumBytes = std::max(NumBytes,
1873 PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI));
1875 // Tail call needs the stack to be aligned.
1876 if (CC==CallingConv::Fast && PerformTailCallOpt) {
1877 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()->
1878 getStackAlignment();
1879 unsigned AlignMask = TargetAlign-1;
1880 NumBytes = (NumBytes + AlignMask) & ~AlignMask;
1886 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
1887 /// adjusted to accomodate the arguments for the tailcall.
1888 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool IsTailCall,
1889 unsigned ParamSize) {
1891 if (!IsTailCall) return 0;
1893 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
1894 unsigned CallerMinReservedArea = FI->getMinReservedArea();
1895 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
1896 // Remember only if the new adjustement is bigger.
1897 if (SPDiff < FI->getTailCallSPDelta())
1898 FI->setTailCallSPDelta(SPDiff);
1903 /// IsEligibleForTailCallElimination - Check to see whether the next instruction
1904 /// following the call is a return. A function is eligible if caller/callee
1905 /// calling conventions match, currently only fastcc supports tail calls, and
1906 /// the function CALL is immediatly followed by a RET.
1908 PPCTargetLowering::IsEligibleForTailCallOptimization(CallSDNode *TheCall,
1910 SelectionDAG& DAG) const {
1911 // Variable argument functions are not supported.
1912 if (!PerformTailCallOpt || TheCall->isVarArg())
1915 if (CheckTailCallReturnConstraints(TheCall, Ret)) {
1916 MachineFunction &MF = DAG.getMachineFunction();
1917 unsigned CallerCC = MF.getFunction()->getCallingConv();
1918 unsigned CalleeCC = TheCall->getCallingConv();
1919 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
1920 // Functions containing by val parameters are not supported.
1921 for (unsigned i = 0; i != TheCall->getNumArgs(); i++) {
1922 ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
1923 if (Flags.isByVal()) return false;
1926 SDValue Callee = TheCall->getCallee();
1927 // Non PIC/GOT tail calls are supported.
1928 if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
1931 // At the moment we can only do local tail calls (in same module, hidden
1932 // or protected) if we are generating PIC.
1933 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1934 return G->getGlobal()->hasHiddenVisibility()
1935 || G->getGlobal()->hasProtectedVisibility();
1942 /// isCallCompatibleAddress - Return the immediate to use if the specified
1943 /// 32-bit value is representable in the immediate field of a BxA instruction.
1944 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
1945 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1948 int Addr = C->getZExtValue();
1949 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1950 (Addr << 6 >> 6) != Addr)
1951 return 0; // Top 6 bits have to be sext of immediate.
1953 return DAG.getConstant((int)C->getZExtValue() >> 2,
1954 DAG.getTargetLoweringInfo().getPointerTy()).getNode();
1959 struct TailCallArgumentInfo {
1964 TailCallArgumentInfo() : FrameIdx(0) {}
1969 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
1971 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
1973 const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs,
1974 SmallVector<SDValue, 8> &MemOpChains) {
1975 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
1976 SDValue Arg = TailCallArgs[i].Arg;
1977 SDValue FIN = TailCallArgs[i].FrameIdxOp;
1978 int FI = TailCallArgs[i].FrameIdx;
1979 // Store relative to framepointer.
1980 MemOpChains.push_back(DAG.getStore(Chain, Arg, FIN,
1981 PseudoSourceValue::getFixedStack(FI),
1986 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
1987 /// the appropriate stack slot for the tail call optimized function call.
1988 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
1989 MachineFunction &MF,
1997 // Calculate the new stack slot for the return address.
1998 int SlotSize = isPPC64 ? 8 : 4;
1999 int NewRetAddrLoc = SPDiff + PPCFrameInfo::getReturnSaveOffset(isPPC64,
2001 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
2003 int NewFPLoc = SPDiff + PPCFrameInfo::getFramePointerSaveOffset(isPPC64,
2005 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc);
2007 MVT VT = isPPC64 ? MVT::i64 : MVT::i32;
2008 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
2009 Chain = DAG.getStore(Chain, OldRetAddr, NewRetAddrFrIdx,
2010 PseudoSourceValue::getFixedStack(NewRetAddr), 0);
2011 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
2012 Chain = DAG.getStore(Chain, OldFP, NewFramePtrIdx,
2013 PseudoSourceValue::getFixedStack(NewFPIdx), 0);
2018 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
2019 /// the position of the argument.
2021 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
2022 SDValue Arg, int SPDiff, unsigned ArgOffset,
2023 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
2024 int Offset = ArgOffset + SPDiff;
2025 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
2026 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
2027 MVT VT = isPPC64 ? MVT::i64 : MVT::i32;
2028 SDValue FIN = DAG.getFrameIndex(FI, VT);
2029 TailCallArgumentInfo Info;
2031 Info.FrameIdxOp = FIN;
2033 TailCallArguments.push_back(Info);
2036 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
2037 /// stack slot. Returns the chain as result and the loaded frame pointers in
2038 /// LROpOut/FPOpout. Used when tail calling.
2039 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
2045 // Load the LR and FP stack slot for later adjusting.
2046 MVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32;
2047 LROpOut = getReturnAddrFrameIndex(DAG);
2048 LROpOut = DAG.getLoad(VT, Chain, LROpOut, NULL, 0);
2049 Chain = SDValue(LROpOut.getNode(), 1);
2050 FPOpOut = getFramePointerFrameIndex(DAG);
2051 FPOpOut = DAG.getLoad(VT, Chain, FPOpOut, NULL, 0);
2052 Chain = SDValue(FPOpOut.getNode(), 1);
2057 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
2058 /// by "Src" to address "Dst" of size "Size". Alignment information is
2059 /// specified by the specific parameter attribute. The copy will be passed as
2060 /// a byval function parameter.
2061 /// Sometimes what we are copying is the end of a larger object, the part that
2062 /// does not fit in registers.
2064 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2065 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2067 SDValue SizeNode = DAG.getConstant(Size, MVT::i32);
2068 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, Flags.getByValAlign(), false,
2072 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
2075 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
2076 SDValue Arg, SDValue PtrOff, int SPDiff,
2077 unsigned ArgOffset, bool isPPC64, bool isTailCall,
2078 bool isVector, SmallVector<SDValue, 8> &MemOpChains,
2079 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
2080 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2085 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
2087 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
2088 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr,
2089 DAG.getConstant(ArgOffset, PtrVT));
2091 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
2092 // Calculate and remember argument location.
2093 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
2097 SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG,
2098 const PPCSubtarget &Subtarget,
2099 TargetMachine &TM) {
2100 CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
2101 SDValue Chain = TheCall->getChain();
2102 bool isVarArg = TheCall->isVarArg();
2103 unsigned CC = TheCall->getCallingConv();
2104 bool isTailCall = TheCall->isTailCall()
2105 && CC == CallingConv::Fast && PerformTailCallOpt;
2106 SDValue Callee = TheCall->getCallee();
2107 unsigned NumOps = TheCall->getNumArgs();
2109 bool isMachoABI = Subtarget.isMachoABI();
2110 bool isELF32_ABI = Subtarget.isELF32_ABI();
2112 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2113 bool isPPC64 = PtrVT == MVT::i64;
2114 unsigned PtrByteSize = isPPC64 ? 8 : 4;
2116 MachineFunction &MF = DAG.getMachineFunction();
2118 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in
2119 // SelectExpr to use to put the arguments in the appropriate registers.
2120 std::vector<SDValue> args_to_use;
2122 // Mark this function as potentially containing a function that contains a
2123 // tail call. As a consequence the frame pointer will be used for dynamicalloc
2124 // and restoring the callers stack pointer in this functions epilog. This is
2125 // done because by tail calling the called function might overwrite the value
2126 // in this function's (MF) stack pointer stack slot 0(SP).
2127 if (PerformTailCallOpt && CC==CallingConv::Fast)
2128 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
2130 unsigned nAltivecParamsAtEnd = 0;
2132 // Count how many bytes are to be pushed on the stack, including the linkage
2133 // area, and parameter passing area. We start with 24/48 bytes, which is
2134 // prereserved space for [SP][CR][LR][3 x unused].
2136 CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isMachoABI, isVarArg, CC,
2137 TheCall, nAltivecParamsAtEnd);
2139 // Calculate by how many bytes the stack has to be adjusted in case of tail
2140 // call optimization.
2141 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
2143 // Adjust the stack pointer for the new arguments...
2144 // These operations are automatically eliminated by the prolog/epilog pass
2145 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
2146 SDValue CallSeqStart = Chain;
2148 // Load the return address and frame pointer so it can be move somewhere else
2151 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp);
2153 // Set up a copy of the stack pointer for use loading and storing any
2154 // arguments that may not fit in the registers available for argument
2158 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
2160 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
2162 // Figure out which arguments are going to go in registers, and which in
2163 // memory. Also, if this is a vararg function, floating point operations
2164 // must be stored to our stack, and loaded into integer regs as well, if
2165 // any integer regs are available for argument passing.
2166 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
2167 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
2169 static const unsigned GPR_32[] = { // 32-bit registers.
2170 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
2171 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
2173 static const unsigned GPR_64[] = { // 64-bit registers.
2174 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
2175 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
2177 static const unsigned *FPR = GetFPR(Subtarget);
2179 static const unsigned VR[] = {
2180 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
2181 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
2183 const unsigned NumGPRs = array_lengthof(GPR_32);
2184 const unsigned NumFPRs = isMachoABI ? 13 : 8;
2185 const unsigned NumVRs = array_lengthof( VR);
2187 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
2189 std::vector<std::pair<unsigned, SDValue> > RegsToPass;
2190 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
2192 SmallVector<SDValue, 8> MemOpChains;
2193 for (unsigned i = 0; i != NumOps; ++i) {
2195 SDValue Arg = TheCall->getArg(i);
2196 ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
2197 // See if next argument requires stack alignment in ELF
2198 bool Align = Flags.isSplit();
2200 // PtrOff will be used to store the current argument to the stack if a
2201 // register cannot be found for it.
2204 // Stack align in ELF 32
2205 if (isELF32_ABI && Align)
2206 PtrOff = DAG.getConstant(ArgOffset + ((ArgOffset/4) % 2) * PtrByteSize,
2207 StackPtr.getValueType());
2209 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
2211 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff);
2213 // On PPC64, promote integers to 64-bit values.
2214 if (isPPC64 && Arg.getValueType() == MVT::i32) {
2215 // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
2216 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
2217 Arg = DAG.getNode(ExtOp, MVT::i64, Arg);
2220 // FIXME Elf untested, what are alignment rules?
2221 // FIXME memcpy is used way more than necessary. Correctness first.
2222 if (Flags.isByVal()) {
2223 unsigned Size = Flags.getByValSize();
2224 if (isELF32_ABI && Align) GPR_idx += (GPR_idx % 2);
2225 if (Size==1 || Size==2) {
2226 // Very small objects are passed right-justified.
2227 // Everything else is passed left-justified.
2228 MVT VT = (Size==1) ? MVT::i8 : MVT::i16;
2229 if (GPR_idx != NumGPRs) {
2230 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, Chain, Arg,
2232 MemOpChains.push_back(Load.getValue(1));
2233 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
2235 ArgOffset += PtrByteSize;
2237 SDValue Const = DAG.getConstant(4 - Size, PtrOff.getValueType());
2238 SDValue AddPtr = DAG.getNode(ISD::ADD, PtrVT, PtrOff, Const);
2239 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr,
2240 CallSeqStart.getNode()->getOperand(0),
2242 // This must go outside the CALLSEQ_START..END.
2243 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
2244 CallSeqStart.getNode()->getOperand(1));
2245 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
2246 NewCallSeqStart.getNode());
2247 Chain = CallSeqStart = NewCallSeqStart;
2248 ArgOffset += PtrByteSize;
2252 // Copy entire object into memory. There are cases where gcc-generated
2253 // code assumes it is there, even if it could be put entirely into
2254 // registers. (This is not what the doc says.)
2255 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
2256 CallSeqStart.getNode()->getOperand(0),
2258 // This must go outside the CALLSEQ_START..END.
2259 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
2260 CallSeqStart.getNode()->getOperand(1));
2261 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), NewCallSeqStart.getNode());
2262 Chain = CallSeqStart = NewCallSeqStart;
2263 // And copy the pieces of it that fit into registers.
2264 for (unsigned j=0; j<Size; j+=PtrByteSize) {
2265 SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
2266 SDValue AddArg = DAG.getNode(ISD::ADD, PtrVT, Arg, Const);
2267 if (GPR_idx != NumGPRs) {
2268 SDValue Load = DAG.getLoad(PtrVT, Chain, AddArg, NULL, 0);
2269 MemOpChains.push_back(Load.getValue(1));
2270 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
2272 ArgOffset += PtrByteSize;
2274 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
2281 switch (Arg.getValueType().getSimpleVT()) {
2282 default: assert(0 && "Unexpected ValueType for argument!");
2285 // Double word align in ELF
2286 if (isELF32_ABI && Align) GPR_idx += (GPR_idx % 2);
2287 if (GPR_idx != NumGPRs) {
2288 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
2290 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
2291 isPPC64, isTailCall, false, MemOpChains,
2295 if (inMem || isMachoABI) {
2296 // Stack align in ELF
2297 if (isELF32_ABI && Align)
2298 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
2300 ArgOffset += PtrByteSize;
2305 if (FPR_idx != NumFPRs) {
2306 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
2309 SDValue Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
2310 MemOpChains.push_back(Store);
2312 // Float varargs are always shadowed in available integer registers
2313 if (GPR_idx != NumGPRs) {
2314 SDValue Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0);
2315 MemOpChains.push_back(Load.getValue(1));
2316 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++],
2319 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
2320 SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
2321 PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour);
2322 SDValue Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0);
2323 MemOpChains.push_back(Load.getValue(1));
2324 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++],
2328 // If we have any FPRs remaining, we may also have GPRs remaining.
2329 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
2332 if (GPR_idx != NumGPRs)
2334 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
2335 !isPPC64) // PPC64 has 64-bit GPR's obviously :)
2340 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
2341 isPPC64, isTailCall, false, MemOpChains,
2345 if (inMem || isMachoABI) {
2346 // Stack align in ELF
2347 if (isELF32_ABI && Align)
2348 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
2352 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
2360 // These go aligned on the stack, or in the corresponding R registers
2361 // when within range. The Darwin PPC ABI doc claims they also go in
2362 // V registers; in fact gcc does this only for arguments that are
2363 // prototyped, not for those that match the ... We do it for all
2364 // arguments, seems to work.
2365 while (ArgOffset % 16 !=0) {
2366 ArgOffset += PtrByteSize;
2367 if (GPR_idx != NumGPRs)
2370 // We could elide this store in the case where the object fits
2371 // entirely in R registers. Maybe later.
2372 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr,
2373 DAG.getConstant(ArgOffset, PtrVT));
2374 SDValue Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
2375 MemOpChains.push_back(Store);
2376 if (VR_idx != NumVRs) {
2377 SDValue Load = DAG.getLoad(MVT::v4f32, Store, PtrOff, NULL, 0);
2378 MemOpChains.push_back(Load.getValue(1));
2379 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
2382 for (unsigned i=0; i<16; i+=PtrByteSize) {
2383 if (GPR_idx == NumGPRs)
2385 SDValue Ix = DAG.getNode(ISD::ADD, PtrVT, PtrOff,
2386 DAG.getConstant(i, PtrVT));
2387 SDValue Load = DAG.getLoad(PtrVT, Store, Ix, NULL, 0);
2388 MemOpChains.push_back(Load.getValue(1));
2389 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
2394 // Non-varargs Altivec params generally go in registers, but have
2395 // stack space allocated at the end.
2396 if (VR_idx != NumVRs) {
2397 // Doesn't have GPR space allocated.
2398 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
2399 } else if (nAltivecParamsAtEnd==0) {
2400 // We are emitting Altivec params in order.
2401 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
2402 isPPC64, isTailCall, true, MemOpChains,
2409 // If all Altivec parameters fit in registers, as they usually do,
2410 // they get stack space following the non-Altivec parameters. We
2411 // don't track this here because nobody below needs it.
2412 // If there are more Altivec parameters than fit in registers emit
2414 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
2416 // Offset is aligned; skip 1st 12 params which go in V registers.
2417 ArgOffset = ((ArgOffset+15)/16)*16;
2419 for (unsigned i = 0; i != NumOps; ++i) {
2420 SDValue Arg = TheCall->getArg(i);
2421 MVT ArgType = Arg.getValueType();
2422 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
2423 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
2426 // We are emitting Altivec params in order.
2427 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
2428 isPPC64, isTailCall, true, MemOpChains,
2436 if (!MemOpChains.empty())
2437 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
2438 &MemOpChains[0], MemOpChains.size());
2440 // Build a sequence of copy-to-reg nodes chained together with token chain
2441 // and flag operands which copy the outgoing args into the appropriate regs.
2443 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2444 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
2446 InFlag = Chain.getValue(1);
2449 // With the ELF 32 ABI, set CR6 to true if this is a vararg call.
2450 if (isVarArg && isELF32_ABI) {
2451 SDValue SetCR(DAG.getTargetNode(PPC::CRSET, MVT::i32), 0);
2452 Chain = DAG.getCopyToReg(Chain, PPC::CR1EQ, SetCR, InFlag);
2453 InFlag = Chain.getValue(1);
2456 // Emit a sequence of copyto/copyfrom virtual registers for arguments that
2457 // might overwrite each other in case of tail call optimization.
2459 SmallVector<SDValue, 8> MemOpChains2;
2460 // Do not flag preceeding copytoreg stuff together with the following stuff.
2462 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
2464 if (!MemOpChains2.empty())
2465 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
2466 &MemOpChains2[0], MemOpChains2.size());
2468 // Store the return address to the appropriate stack slot.
2469 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff,
2470 isPPC64, isMachoABI);
2473 // Emit callseq_end just before tailcall node.
2475 SmallVector<SDValue, 8> CallSeqOps;
2476 SDVTList CallSeqNodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
2477 CallSeqOps.push_back(Chain);
2478 CallSeqOps.push_back(DAG.getIntPtrConstant(NumBytes, true));
2479 CallSeqOps.push_back(DAG.getIntPtrConstant(0, true));
2480 if (InFlag.getNode())
2481 CallSeqOps.push_back(InFlag);
2482 Chain = DAG.getNode(ISD::CALLSEQ_END, CallSeqNodeTys, &CallSeqOps[0],
2484 InFlag = Chain.getValue(1);
2487 std::vector<MVT> NodeTys;
2488 NodeTys.push_back(MVT::Other); // Returns a chain
2489 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
2491 SmallVector<SDValue, 8> Ops;
2492 unsigned CallOpc = isMachoABI? PPCISD::CALL_Macho : PPCISD::CALL_ELF;
2494 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
2495 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
2496 // node so that legalize doesn't hack it.
2497 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
2498 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType());
2499 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
2500 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType());
2501 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
2502 // If this is an absolute destination address, use the munged value.
2503 Callee = SDValue(Dest, 0);
2505 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair
2506 // to do the call, we can't use PPCISD::CALL.
2507 SDValue MTCTROps[] = {Chain, Callee, InFlag};
2508 Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, MTCTROps,
2509 2 + (InFlag.getNode() != 0));
2510 InFlag = Chain.getValue(1);
2512 // Copy the callee address into R12/X12 on darwin.
2514 unsigned Reg = Callee.getValueType() == MVT::i32 ? PPC::R12 : PPC::X12;
2515 Chain = DAG.getCopyToReg(Chain, Reg, Callee, InFlag);
2516 InFlag = Chain.getValue(1);
2520 NodeTys.push_back(MVT::Other);
2521 NodeTys.push_back(MVT::Flag);
2522 Ops.push_back(Chain);
2523 CallOpc = isMachoABI ? PPCISD::BCTRL_Macho : PPCISD::BCTRL_ELF;
2525 // Add CTR register as callee so a bctr can be emitted later.
2527 Ops.push_back(DAG.getRegister(PPC::CTR, getPointerTy()));
2530 // If this is a direct call, pass the chain and the callee.
2531 if (Callee.getNode()) {
2532 Ops.push_back(Chain);
2533 Ops.push_back(Callee);
2535 // If this is a tail call add stack pointer delta.
2537 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32));
2539 // Add argument registers to the end of the list so that they are known live
2541 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2542 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2543 RegsToPass[i].second.getValueType()));
2545 // When performing tail call optimization the callee pops its arguments off
2546 // the stack. Account for this here so these bytes can be pushed back on in
2547 // PPCRegisterInfo::eliminateCallFramePseudoInstr.
2548 int BytesCalleePops =
2549 (CC==CallingConv::Fast && PerformTailCallOpt) ? NumBytes : 0;
2551 if (InFlag.getNode())
2552 Ops.push_back(InFlag);
2556 assert(InFlag.getNode() &&
2557 "Flag must be set. Depend on flag being set in LowerRET");
2558 Chain = DAG.getNode(PPCISD::TAILCALL,
2559 TheCall->getVTList(), &Ops[0], Ops.size());
2560 return SDValue(Chain.getNode(), Op.getResNo());
2563 Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size());
2564 InFlag = Chain.getValue(1);
2566 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
2567 DAG.getIntPtrConstant(BytesCalleePops, true),
2569 if (TheCall->getValueType(0) != MVT::Other)
2570 InFlag = Chain.getValue(1);
2572 SmallVector<SDValue, 16> ResultVals;
2573 SmallVector<CCValAssign, 16> RVLocs;
2574 unsigned CallerCC = DAG.getMachineFunction().getFunction()->getCallingConv();
2575 CCState CCInfo(CallerCC, isVarArg, TM, RVLocs);
2576 CCInfo.AnalyzeCallResult(TheCall, RetCC_PPC);
2578 // Copy all of the result registers out of their specified physreg.
2579 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2580 CCValAssign &VA = RVLocs[i];
2581 MVT VT = VA.getValVT();
2582 assert(VA.isRegLoc() && "Can only return in registers!");
2583 Chain = DAG.getCopyFromReg(Chain, VA.getLocReg(), VT, InFlag).getValue(1);
2584 ResultVals.push_back(Chain.getValue(0));
2585 InFlag = Chain.getValue(2);
2588 // If the function returns void, just return the chain.
2592 // Otherwise, merge everything together with a MERGE_VALUES node.
2593 ResultVals.push_back(Chain);
2594 SDValue Res = DAG.getMergeValues(TheCall->getVTList(), &ResultVals[0],
2596 return Res.getValue(Op.getResNo());
2599 SDValue PPCTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG,
2600 TargetMachine &TM) {
2601 SmallVector<CCValAssign, 16> RVLocs;
2602 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
2603 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
2604 CCState CCInfo(CC, isVarArg, TM, RVLocs);
2605 CCInfo.AnalyzeReturn(Op.getNode(), RetCC_PPC);
2607 // If this is the first return lowered for this function, add the regs to the
2608 // liveout set for the function.
2609 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
2610 for (unsigned i = 0; i != RVLocs.size(); ++i)
2611 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
2614 SDValue Chain = Op.getOperand(0);
2616 Chain = GetPossiblePreceedingTailCall(Chain, PPCISD::TAILCALL);
2617 if (Chain.getOpcode() == PPCISD::TAILCALL) {
2618 SDValue TailCall = Chain;
2619 SDValue TargetAddress = TailCall.getOperand(1);
2620 SDValue StackAdjustment = TailCall.getOperand(2);
2622 assert(((TargetAddress.getOpcode() == ISD::Register &&
2623 cast<RegisterSDNode>(TargetAddress)->getReg() == PPC::CTR) ||
2624 TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
2625 TargetAddress.getOpcode() == ISD::TargetGlobalAddress ||
2626 isa<ConstantSDNode>(TargetAddress)) &&
2627 "Expecting an global address, external symbol, absolute value or register");
2629 assert(StackAdjustment.getOpcode() == ISD::Constant &&
2630 "Expecting a const value");
2632 SmallVector<SDValue,8> Operands;
2633 Operands.push_back(Chain.getOperand(0));
2634 Operands.push_back(TargetAddress);
2635 Operands.push_back(StackAdjustment);
2636 // Copy registers used by the call. Last operand is a flag so it is not
2638 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
2639 Operands.push_back(Chain.getOperand(i));
2641 return DAG.getNode(PPCISD::TC_RETURN, MVT::Other, &Operands[0],
2647 // Copy the result values into the output registers.
2648 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2649 CCValAssign &VA = RVLocs[i];
2650 assert(VA.isRegLoc() && "Can only return in registers!");
2651 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), Flag);
2652 Flag = Chain.getValue(1);
2656 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain, Flag);
2658 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain);
2661 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
2662 const PPCSubtarget &Subtarget) {
2663 // When we pop the dynamic allocation we need to restore the SP link.
2665 // Get the corect type for pointers.
2666 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2668 // Construct the stack pointer operand.
2669 bool IsPPC64 = Subtarget.isPPC64();
2670 unsigned SP = IsPPC64 ? PPC::X1 : PPC::R1;
2671 SDValue StackPtr = DAG.getRegister(SP, PtrVT);
2673 // Get the operands for the STACKRESTORE.
2674 SDValue Chain = Op.getOperand(0);
2675 SDValue SaveSP = Op.getOperand(1);
2677 // Load the old link SP.
2678 SDValue LoadLinkSP = DAG.getLoad(PtrVT, Chain, StackPtr, NULL, 0);
2680 // Restore the stack pointer.
2681 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), SP, SaveSP);
2683 // Store the old link SP.
2684 return DAG.getStore(Chain, LoadLinkSP, StackPtr, NULL, 0);
2690 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
2691 MachineFunction &MF = DAG.getMachineFunction();
2692 bool IsPPC64 = PPCSubTarget.isPPC64();
2693 bool isMachoABI = PPCSubTarget.isMachoABI();
2694 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2696 // Get current frame pointer save index. The users of this index will be
2697 // primarily DYNALLOC instructions.
2698 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
2699 int RASI = FI->getReturnAddrSaveIndex();
2701 // If the frame pointer save index hasn't been defined yet.
2703 // Find out what the fix offset of the frame pointer save area.
2704 int LROffset = PPCFrameInfo::getReturnSaveOffset(IsPPC64, isMachoABI);
2705 // Allocate the frame index for frame pointer save area.
2706 RASI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, LROffset);
2708 FI->setReturnAddrSaveIndex(RASI);
2710 return DAG.getFrameIndex(RASI, PtrVT);
2714 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
2715 MachineFunction &MF = DAG.getMachineFunction();
2716 bool IsPPC64 = PPCSubTarget.isPPC64();
2717 bool isMachoABI = PPCSubTarget.isMachoABI();
2718 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2720 // Get current frame pointer save index. The users of this index will be
2721 // primarily DYNALLOC instructions.
2722 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
2723 int FPSI = FI->getFramePointerSaveIndex();
2725 // If the frame pointer save index hasn't been defined yet.
2727 // Find out what the fix offset of the frame pointer save area.
2728 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, isMachoABI);
2730 // Allocate the frame index for frame pointer save area.
2731 FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, FPOffset);
2733 FI->setFramePointerSaveIndex(FPSI);
2735 return DAG.getFrameIndex(FPSI, PtrVT);
2738 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
2740 const PPCSubtarget &Subtarget) {
2742 SDValue Chain = Op.getOperand(0);
2743 SDValue Size = Op.getOperand(1);
2745 // Get the corect type for pointers.
2746 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2748 SDValue NegSize = DAG.getNode(ISD::SUB, PtrVT,
2749 DAG.getConstant(0, PtrVT), Size);
2750 // Construct a node for the frame pointer save index.
2751 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
2752 // Build a DYNALLOC node.
2753 SDValue Ops[3] = { Chain, NegSize, FPSIdx };
2754 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
2755 return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3);
2758 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
2760 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
2761 // Not FP? Not a fsel.
2762 if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
2763 !Op.getOperand(2).getValueType().isFloatingPoint())
2766 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2768 // Cannot handle SETEQ/SETNE.
2769 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDValue();
2771 MVT ResVT = Op.getValueType();
2772 MVT CmpVT = Op.getOperand(0).getValueType();
2773 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
2774 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3);
2776 // If the RHS of the comparison is a 0.0, we don't need to do the
2777 // subtraction at all.
2778 if (isFloatingPointZero(RHS))
2780 default: break; // SETUO etc aren't handled by fsel.
2783 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
2786 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
2787 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
2788 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV);
2791 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
2794 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
2795 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
2796 return DAG.getNode(PPCISD::FSEL, ResVT,
2797 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV);
2802 default: break; // SETUO etc aren't handled by fsel.
2805 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
2806 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
2807 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
2808 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
2811 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
2812 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
2813 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
2814 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
2817 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
2818 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
2819 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
2820 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
2823 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
2824 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
2825 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
2826 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
2831 // FIXME: Split this code up when LegalizeDAGTypes lands.
2832 SDValue PPCTargetLowering::LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
2833 assert(Op.getOperand(0).getValueType().isFloatingPoint());
2834 SDValue Src = Op.getOperand(0);
2835 if (Src.getValueType() == MVT::f32)
2836 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src);
2839 switch (Op.getValueType().getSimpleVT()) {
2840 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!");
2842 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src);
2845 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src);
2849 // Convert the FP value to an int value through memory.
2850 SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64);
2852 // Emit a store to the stack slot.
2853 SDValue Chain = DAG.getStore(DAG.getEntryNode(), Tmp, FIPtr, NULL, 0);
2855 // Result is a load from the stack slot. If loading 4 bytes, make sure to
2857 if (Op.getValueType() == MVT::i32)
2858 FIPtr = DAG.getNode(ISD::ADD, FIPtr.getValueType(), FIPtr,
2859 DAG.getConstant(4, FIPtr.getValueType()));
2860 return DAG.getLoad(Op.getValueType(), Chain, FIPtr, NULL, 0);
2863 SDValue PPCTargetLowering::LowerFP_ROUND_INREG(SDValue Op,
2864 SelectionDAG &DAG) {
2865 assert(Op.getValueType() == MVT::ppcf128);
2866 SDNode *Node = Op.getNode();
2867 assert(Node->getOperand(0).getValueType() == MVT::ppcf128);
2868 assert(Node->getOperand(0).getNode()->getOpcode() == ISD::BUILD_PAIR);
2869 SDValue Lo = Node->getOperand(0).getNode()->getOperand(0);
2870 SDValue Hi = Node->getOperand(0).getNode()->getOperand(1);
2872 // This sequence changes FPSCR to do round-to-zero, adds the two halves
2873 // of the long double, and puts FPSCR back the way it was. We do not
2874 // actually model FPSCR.
2875 std::vector<MVT> NodeTys;
2876 SDValue Ops[4], Result, MFFSreg, InFlag, FPreg;
2878 NodeTys.push_back(MVT::f64); // Return register
2879 NodeTys.push_back(MVT::Flag); // Returns a flag for later insns
2880 Result = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0);
2881 MFFSreg = Result.getValue(0);
2882 InFlag = Result.getValue(1);
2885 NodeTys.push_back(MVT::Flag); // Returns a flag
2886 Ops[0] = DAG.getConstant(31, MVT::i32);
2888 Result = DAG.getNode(PPCISD::MTFSB1, NodeTys, Ops, 2);
2889 InFlag = Result.getValue(0);
2892 NodeTys.push_back(MVT::Flag); // Returns a flag
2893 Ops[0] = DAG.getConstant(30, MVT::i32);
2895 Result = DAG.getNode(PPCISD::MTFSB0, NodeTys, Ops, 2);
2896 InFlag = Result.getValue(0);
2899 NodeTys.push_back(MVT::f64); // result of add
2900 NodeTys.push_back(MVT::Flag); // Returns a flag
2904 Result = DAG.getNode(PPCISD::FADDRTZ, NodeTys, Ops, 3);
2905 FPreg = Result.getValue(0);
2906 InFlag = Result.getValue(1);
2909 NodeTys.push_back(MVT::f64);
2910 Ops[0] = DAG.getConstant(1, MVT::i32);
2914 Result = DAG.getNode(PPCISD::MTFSF, NodeTys, Ops, 4);
2915 FPreg = Result.getValue(0);
2917 // We know the low half is about to be thrown away, so just use something
2919 return DAG.getNode(ISD::BUILD_PAIR, Lo.getValueType(), FPreg, FPreg);
2922 SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
2923 // Don't handle ppc_fp128 here; let it be lowered to a libcall.
2924 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
2927 if (Op.getOperand(0).getValueType() == MVT::i64) {
2928 SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0));
2929 SDValue FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits);
2930 if (Op.getValueType() == MVT::f32)
2931 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0));
2935 assert(Op.getOperand(0).getValueType() == MVT::i32 &&
2936 "Unhandled SINT_TO_FP type in custom expander!");
2937 // Since we only generate this in 64-bit mode, we can take advantage of
2938 // 64-bit registers. In particular, sign extend the input value into the
2939 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
2940 // then lfd it and fcfid it.
2941 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
2942 int FrameIdx = FrameInfo->CreateStackObject(8, 8);
2943 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2944 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
2946 SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32,
2949 // STD the extended value into the stack slot.
2950 MachineMemOperand MO(PseudoSourceValue::getFixedStack(FrameIdx),
2951 MachineMemOperand::MOStore, 0, 8, 8);
2952 SDValue Store = DAG.getNode(PPCISD::STD_32, MVT::Other,
2953 DAG.getEntryNode(), Ext64, FIdx,
2954 DAG.getMemOperand(MO));
2955 // Load the value as a double.
2956 SDValue Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0);
2958 // FCFID it and return it.
2959 SDValue FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld);
2960 if (Op.getValueType() == MVT::f32)
2961 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0));
2965 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) {
2967 The rounding mode is in bits 30:31 of FPSR, and has the following
2974 FLT_ROUNDS, on the other hand, expects the following:
2981 To perform the conversion, we do:
2982 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
2985 MachineFunction &MF = DAG.getMachineFunction();
2986 MVT VT = Op.getValueType();
2987 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2988 std::vector<MVT> NodeTys;
2989 SDValue MFFSreg, InFlag;
2991 // Save FP Control Word to register
2992 NodeTys.push_back(MVT::f64); // return register
2993 NodeTys.push_back(MVT::Flag); // unused in this context
2994 SDValue Chain = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0);
2996 // Save FP register to stack slot
2997 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
2998 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
2999 SDValue Store = DAG.getStore(DAG.getEntryNode(), Chain,
3000 StackSlot, NULL, 0);
3002 // Load FP Control Word from low 32 bits of stack slot.
3003 SDValue Four = DAG.getConstant(4, PtrVT);
3004 SDValue Addr = DAG.getNode(ISD::ADD, PtrVT, StackSlot, Four);
3005 SDValue CWD = DAG.getLoad(MVT::i32, Store, Addr, NULL, 0);
3007 // Transform as necessary
3009 DAG.getNode(ISD::AND, MVT::i32,
3010 CWD, DAG.getConstant(3, MVT::i32));
3012 DAG.getNode(ISD::SRL, MVT::i32,
3013 DAG.getNode(ISD::AND, MVT::i32,
3014 DAG.getNode(ISD::XOR, MVT::i32,
3015 CWD, DAG.getConstant(3, MVT::i32)),
3016 DAG.getConstant(3, MVT::i32)),
3017 DAG.getConstant(1, MVT::i8));
3020 DAG.getNode(ISD::XOR, MVT::i32, CWD1, CWD2);
3022 return DAG.getNode((VT.getSizeInBits() < 16 ?
3023 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal);
3026 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) {
3027 MVT VT = Op.getValueType();
3028 unsigned BitWidth = VT.getSizeInBits();
3029 assert(Op.getNumOperands() == 3 &&
3030 VT == Op.getOperand(1).getValueType() &&
3033 // Expand into a bunch of logical ops. Note that these ops
3034 // depend on the PPC behavior for oversized shift amounts.
3035 SDValue Lo = Op.getOperand(0);
3036 SDValue Hi = Op.getOperand(1);
3037 SDValue Amt = Op.getOperand(2);
3038 MVT AmtVT = Amt.getValueType();
3040 SDValue Tmp1 = DAG.getNode(ISD::SUB, AmtVT,
3041 DAG.getConstant(BitWidth, AmtVT), Amt);
3042 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, VT, Hi, Amt);
3043 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, VT, Lo, Tmp1);
3044 SDValue Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3);
3045 SDValue Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt,
3046 DAG.getConstant(-BitWidth, AmtVT));
3047 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, VT, Lo, Tmp5);
3048 SDValue OutHi = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6);
3049 SDValue OutLo = DAG.getNode(PPCISD::SHL, VT, Lo, Amt);
3050 SDValue OutOps[] = { OutLo, OutHi };
3051 return DAG.getMergeValues(OutOps, 2);
3054 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) {
3055 MVT VT = Op.getValueType();
3056 unsigned BitWidth = VT.getSizeInBits();
3057 assert(Op.getNumOperands() == 3 &&
3058 VT == Op.getOperand(1).getValueType() &&
3061 // Expand into a bunch of logical ops. Note that these ops
3062 // depend on the PPC behavior for oversized shift amounts.
3063 SDValue Lo = Op.getOperand(0);
3064 SDValue Hi = Op.getOperand(1);
3065 SDValue Amt = Op.getOperand(2);
3066 MVT AmtVT = Amt.getValueType();
3068 SDValue Tmp1 = DAG.getNode(ISD::SUB, AmtVT,
3069 DAG.getConstant(BitWidth, AmtVT), Amt);
3070 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt);
3071 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1);
3072 SDValue Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3);
3073 SDValue Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt,
3074 DAG.getConstant(-BitWidth, AmtVT));
3075 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, VT, Hi, Tmp5);
3076 SDValue OutLo = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6);
3077 SDValue OutHi = DAG.getNode(PPCISD::SRL, VT, Hi, Amt);
3078 SDValue OutOps[] = { OutLo, OutHi };
3079 return DAG.getMergeValues(OutOps, 2);
3082 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) {
3083 MVT VT = Op.getValueType();
3084 unsigned BitWidth = VT.getSizeInBits();
3085 assert(Op.getNumOperands() == 3 &&
3086 VT == Op.getOperand(1).getValueType() &&
3089 // Expand into a bunch of logical ops, followed by a select_cc.
3090 SDValue Lo = Op.getOperand(0);
3091 SDValue Hi = Op.getOperand(1);
3092 SDValue Amt = Op.getOperand(2);
3093 MVT AmtVT = Amt.getValueType();
3095 SDValue Tmp1 = DAG.getNode(ISD::SUB, AmtVT,
3096 DAG.getConstant(BitWidth, AmtVT), Amt);
3097 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt);
3098 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1);
3099 SDValue Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3);
3100 SDValue Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt,
3101 DAG.getConstant(-BitWidth, AmtVT));
3102 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, VT, Hi, Tmp5);
3103 SDValue OutHi = DAG.getNode(PPCISD::SRA, VT, Hi, Amt);
3104 SDValue OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, AmtVT),
3105 Tmp4, Tmp6, ISD::SETLE);
3106 SDValue OutOps[] = { OutLo, OutHi };
3107 return DAG.getMergeValues(OutOps, 2);
3110 //===----------------------------------------------------------------------===//
3111 // Vector related lowering.
3114 // If this is a vector of constants or undefs, get the bits. A bit in
3115 // UndefBits is set if the corresponding element of the vector is an
3116 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
3117 // zero. Return true if this is not an array of constants, false if it is.
3119 static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2],
3120 uint64_t UndefBits[2]) {
3121 // Start with zero'd results.
3122 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0;
3124 unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits();
3125 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3126 SDValue OpVal = BV->getOperand(i);
3128 unsigned PartNo = i >= e/2; // In the upper 128 bits?
3129 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t.
3131 uint64_t EltBits = 0;
3132 if (OpVal.getOpcode() == ISD::UNDEF) {
3133 uint64_t EltUndefBits = ~0U >> (32-EltBitSize);
3134 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize);
3136 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
3137 EltBits = CN->getZExtValue() & (~0U >> (32-EltBitSize));
3138 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
3139 assert(CN->getValueType(0) == MVT::f32 &&
3140 "Only one legal FP vector type!");
3141 EltBits = FloatToBits(CN->getValueAPF().convertToFloat());
3143 // Nonconstant element.
3147 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize);
3150 //printf("%llx %llx %llx %llx\n",
3151 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]);
3155 // If this is a splat (repetition) of a value across the whole vector, return
3156 // the smallest size that splats it. For example, "0x01010101010101..." is a
3157 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
3158 // SplatSize = 1 byte.
3159 static bool isConstantSplat(const uint64_t Bits128[2],
3160 const uint64_t Undef128[2],
3161 unsigned &SplatBits, unsigned &SplatUndef,
3162 unsigned &SplatSize) {
3164 // Don't let undefs prevent splats from matching. See if the top 64-bits are
3165 // the same as the lower 64-bits, ignoring undefs.
3166 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0]))
3167 return false; // Can't be a splat if two pieces don't match.
3169 uint64_t Bits64 = Bits128[0] | Bits128[1];
3170 uint64_t Undef64 = Undef128[0] & Undef128[1];
3172 // Check that the top 32-bits are the same as the lower 32-bits, ignoring
3174 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64))
3175 return false; // Can't be a splat if two pieces don't match.
3177 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32);
3178 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32);
3180 // If the top 16-bits are different than the lower 16-bits, ignoring
3181 // undefs, we have an i32 splat.
3182 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) {
3184 SplatUndef = Undef32;
3189 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16);
3190 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16);
3192 // If the top 8-bits are different than the lower 8-bits, ignoring
3193 // undefs, we have an i16 splat.
3194 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) {
3196 SplatUndef = Undef16;
3201 // Otherwise, we have an 8-bit splat.
3202 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8);
3203 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
3208 /// BuildSplatI - Build a canonical splati of Val with an element size of
3209 /// SplatSize. Cast the result to VT.
3210 static SDValue BuildSplatI(int Val, unsigned SplatSize, MVT VT,
3211 SelectionDAG &DAG) {
3212 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
3214 static const MVT VTys[] = { // canonical VT to use for each size.
3215 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
3218 MVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
3220 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
3224 MVT CanonicalVT = VTys[SplatSize-1];
3226 // Build a canonical splat for this value.
3227 SDValue Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType());
3228 SmallVector<SDValue, 8> Ops;
3229 Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
3230 SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT,
3231 &Ops[0], Ops.size());
3232 return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res);
3235 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
3236 /// specified intrinsic ID.
3237 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
3239 MVT DestVT = MVT::Other) {
3240 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
3241 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
3242 DAG.getConstant(IID, MVT::i32), LHS, RHS);
3245 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
3246 /// specified intrinsic ID.
3247 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
3248 SDValue Op2, SelectionDAG &DAG,
3249 MVT DestVT = MVT::Other) {
3250 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
3251 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
3252 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
3256 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
3257 /// amount. The result has the specified value type.
3258 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
3259 MVT VT, SelectionDAG &DAG) {
3260 // Force LHS/RHS to be the right type.
3261 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS);
3262 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS);
3265 for (unsigned i = 0; i != 16; ++i)
3266 Ops[i] = DAG.getConstant(i+Amt, MVT::i8);
3267 SDValue T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS,
3268 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16));
3269 return DAG.getNode(ISD::BIT_CONVERT, VT, T);
3272 // If this is a case we can't handle, return null and let the default
3273 // expansion code take care of it. If we CAN select this case, and if it
3274 // selects to a single instruction, return Op. Otherwise, if we can codegen
3275 // this case more efficiently than a constant pool load, lower it to the
3276 // sequence of ops that should be used.
3277 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
3278 SelectionDAG &DAG) {
3279 // If this is a vector of constants or undefs, get the bits. A bit in
3280 // UndefBits is set if the corresponding element of the vector is an
3281 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
3283 uint64_t VectorBits[2];
3284 uint64_t UndefBits[2];
3285 if (GetConstantBuildVectorBits(Op.getNode(), VectorBits, UndefBits))
3286 return SDValue(); // Not a constant vector.
3288 // If this is a splat (repetition) of a value across the whole vector, return
3289 // the smallest size that splats it. For example, "0x01010101010101..." is a
3290 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
3291 // SplatSize = 1 byte.
3292 unsigned SplatBits, SplatUndef, SplatSize;
3293 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){
3294 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0;
3296 // First, handle single instruction cases.
3299 if (SplatBits == 0) {
3300 // Canonicalize all zero vectors to be v4i32.
3301 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
3302 SDValue Z = DAG.getConstant(0, MVT::i32);
3303 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z);
3304 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z);
3309 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
3310 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize);
3311 if (SextVal >= -16 && SextVal <= 15)
3312 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG);
3315 // Two instruction sequences.
3317 // If this value is in the range [-32,30] and is even, use:
3318 // tmp = VSPLTI[bhw], result = add tmp, tmp
3319 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
3320 SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG);
3321 Res = DAG.getNode(ISD::ADD, Res.getValueType(), Res, Res);
3322 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
3325 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
3326 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
3328 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
3329 // Make -1 and vspltisw -1:
3330 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG);
3332 // Make the VSLW intrinsic, computing 0x8000_0000.
3333 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
3336 // xor by OnesV to invert it.
3337 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV);
3338 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
3341 // Check to see if this is a wide variety of vsplti*, binop self cases.
3342 unsigned SplatBitSize = SplatSize*8;
3343 static const signed char SplatCsts[] = {
3344 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
3345 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
3348 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
3349 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
3350 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
3351 int i = SplatCsts[idx];
3353 // Figure out what shift amount will be used by altivec if shifted by i in
3355 unsigned TypeShiftAmt = i & (SplatBitSize-1);
3357 // vsplti + shl self.
3358 if (SextVal == (i << (int)TypeShiftAmt)) {
3359 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
3360 static const unsigned IIDs[] = { // Intrinsic to use for each size.
3361 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
3362 Intrinsic::ppc_altivec_vslw
3364 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
3365 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
3368 // vsplti + srl self.
3369 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
3370 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
3371 static const unsigned IIDs[] = { // Intrinsic to use for each size.
3372 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
3373 Intrinsic::ppc_altivec_vsrw
3375 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
3376 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
3379 // vsplti + sra self.
3380 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
3381 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
3382 static const unsigned IIDs[] = { // Intrinsic to use for each size.
3383 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
3384 Intrinsic::ppc_altivec_vsraw
3386 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
3387 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
3390 // vsplti + rol self.
3391 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
3392 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
3393 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
3394 static const unsigned IIDs[] = { // Intrinsic to use for each size.
3395 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
3396 Intrinsic::ppc_altivec_vrlw
3398 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
3399 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
3402 // t = vsplti c, result = vsldoi t, t, 1
3403 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) {
3404 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
3405 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG);
3407 // t = vsplti c, result = vsldoi t, t, 2
3408 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) {
3409 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
3410 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG);
3412 // t = vsplti c, result = vsldoi t, t, 3
3413 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) {
3414 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
3415 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG);
3419 // Three instruction sequences.
3421 // Odd, in range [17,31]: (vsplti C)-(vsplti -16).
3422 if (SextVal >= 0 && SextVal <= 31) {
3423 SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG);
3424 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
3425 LHS = DAG.getNode(ISD::SUB, LHS.getValueType(), LHS, RHS);
3426 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
3428 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
3429 if (SextVal >= -31 && SextVal <= 0) {
3430 SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG);
3431 SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
3432 LHS = DAG.getNode(ISD::ADD, LHS.getValueType(), LHS, RHS);
3433 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
3440 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
3441 /// the specified operations to build the shuffle.
3442 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
3443 SDValue RHS, SelectionDAG &DAG) {
3444 unsigned OpNum = (PFEntry >> 26) & 0x0F;
3445 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
3446 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
3449 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
3461 if (OpNum == OP_COPY) {
3462 if (LHSID == (1*9+2)*9+3) return LHS;
3463 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
3467 SDValue OpLHS, OpRHS;
3468 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG);
3469 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG);
3471 unsigned ShufIdxs[16];
3473 default: assert(0 && "Unknown i32 permute!");
3475 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
3476 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
3477 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
3478 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
3481 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
3482 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
3483 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
3484 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
3487 for (unsigned i = 0; i != 16; ++i)
3488 ShufIdxs[i] = (i&3)+0;
3491 for (unsigned i = 0; i != 16; ++i)
3492 ShufIdxs[i] = (i&3)+4;
3495 for (unsigned i = 0; i != 16; ++i)
3496 ShufIdxs[i] = (i&3)+8;
3499 for (unsigned i = 0; i != 16; ++i)
3500 ShufIdxs[i] = (i&3)+12;
3503 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG);
3505 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG);
3507 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG);
3510 for (unsigned i = 0; i != 16; ++i)
3511 Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i8);
3513 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS,
3514 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16));
3517 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
3518 /// is a shuffle we can handle in a single instruction, return it. Otherwise,
3519 /// return the code it can be lowered into. Worst case, it can always be
3520 /// lowered into a vperm.
3521 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
3522 SelectionDAG &DAG) {
3523 SDValue V1 = Op.getOperand(0);
3524 SDValue V2 = Op.getOperand(1);
3525 SDValue PermMask = Op.getOperand(2);
3527 // Cases that are handled by instructions that take permute immediates
3528 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
3529 // selected by the instruction selector.
3530 if (V2.getOpcode() == ISD::UNDEF) {
3531 if (PPC::isSplatShuffleMask(PermMask.getNode(), 1) ||
3532 PPC::isSplatShuffleMask(PermMask.getNode(), 2) ||
3533 PPC::isSplatShuffleMask(PermMask.getNode(), 4) ||
3534 PPC::isVPKUWUMShuffleMask(PermMask.getNode(), true) ||
3535 PPC::isVPKUHUMShuffleMask(PermMask.getNode(), true) ||
3536 PPC::isVSLDOIShuffleMask(PermMask.getNode(), true) != -1 ||
3537 PPC::isVMRGLShuffleMask(PermMask.getNode(), 1, true) ||
3538 PPC::isVMRGLShuffleMask(PermMask.getNode(), 2, true) ||
3539 PPC::isVMRGLShuffleMask(PermMask.getNode(), 4, true) ||
3540 PPC::isVMRGHShuffleMask(PermMask.getNode(), 1, true) ||
3541 PPC::isVMRGHShuffleMask(PermMask.getNode(), 2, true) ||
3542 PPC::isVMRGHShuffleMask(PermMask.getNode(), 4, true)) {
3547 // Altivec has a variety of "shuffle immediates" that take two vector inputs
3548 // and produce a fixed permutation. If any of these match, do not lower to
3550 if (PPC::isVPKUWUMShuffleMask(PermMask.getNode(), false) ||
3551 PPC::isVPKUHUMShuffleMask(PermMask.getNode(), false) ||
3552 PPC::isVSLDOIShuffleMask(PermMask.getNode(), false) != -1 ||
3553 PPC::isVMRGLShuffleMask(PermMask.getNode(), 1, false) ||
3554 PPC::isVMRGLShuffleMask(PermMask.getNode(), 2, false) ||
3555 PPC::isVMRGLShuffleMask(PermMask.getNode(), 4, false) ||
3556 PPC::isVMRGHShuffleMask(PermMask.getNode(), 1, false) ||
3557 PPC::isVMRGHShuffleMask(PermMask.getNode(), 2, false) ||
3558 PPC::isVMRGHShuffleMask(PermMask.getNode(), 4, false))
3561 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
3562 // perfect shuffle table to emit an optimal matching sequence.
3563 unsigned PFIndexes[4];
3564 bool isFourElementShuffle = true;
3565 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
3566 unsigned EltNo = 8; // Start out undef.
3567 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
3568 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF)
3569 continue; // Undef, ignore it.
3571 unsigned ByteSource =
3572 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getZExtValue();
3573 if ((ByteSource & 3) != j) {
3574 isFourElementShuffle = false;
3579 EltNo = ByteSource/4;
3580 } else if (EltNo != ByteSource/4) {
3581 isFourElementShuffle = false;
3585 PFIndexes[i] = EltNo;
3588 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
3589 // perfect shuffle vector to determine if it is cost effective to do this as
3590 // discrete instructions, or whether we should use a vperm.
3591 if (isFourElementShuffle) {
3592 // Compute the index in the perfect shuffle table.
3593 unsigned PFTableIndex =
3594 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
3596 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
3597 unsigned Cost = (PFEntry >> 30);
3599 // Determining when to avoid vperm is tricky. Many things affect the cost
3600 // of vperm, particularly how many times the perm mask needs to be computed.
3601 // For example, if the perm mask can be hoisted out of a loop or is already
3602 // used (perhaps because there are multiple permutes with the same shuffle
3603 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of
3604 // the loop requires an extra register.
3606 // As a compromise, we only emit discrete instructions if the shuffle can be
3607 // generated in 3 or fewer operations. When we have loop information
3608 // available, if this block is within a loop, we should avoid using vperm
3609 // for 3-operation perms and use a constant pool load instead.
3611 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG);
3614 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
3615 // vector that will get spilled to the constant pool.
3616 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
3618 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
3619 // that it is in input element units, not in bytes. Convert now.
3620 MVT EltVT = V1.getValueType().getVectorElementType();
3621 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
3623 SmallVector<SDValue, 16> ResultMask;
3624 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
3626 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF)
3629 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getZExtValue();
3631 for (unsigned j = 0; j != BytesPerElement; ++j)
3632 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
3636 SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
3637 &ResultMask[0], ResultMask.size());
3638 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask);
3641 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an
3642 /// altivec comparison. If it is, return true and fill in Opc/isDot with
3643 /// information about the intrinsic.
3644 static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc,
3646 unsigned IntrinsicID =
3647 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
3650 switch (IntrinsicID) {
3651 default: return false;
3652 // Comparison predicates.
3653 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break;
3654 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break;
3655 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break;
3656 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break;
3657 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break;
3658 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break;
3659 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break;
3660 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break;
3661 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break;
3662 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break;
3663 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break;
3664 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break;
3665 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break;
3667 // Normal Comparisons.
3668 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break;
3669 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break;
3670 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break;
3671 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break;
3672 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break;
3673 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break;
3674 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break;
3675 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break;
3676 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break;
3677 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break;
3678 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break;
3679 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break;
3680 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break;
3685 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
3686 /// lower, do it, otherwise return null.
3687 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3688 SelectionDAG &DAG) {
3689 // If this is a lowered altivec predicate compare, CompareOpc is set to the
3690 // opcode number of the comparison.
3693 if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
3694 return SDValue(); // Don't custom lower most intrinsics.
3696 // If this is a non-dot comparison, make the VCMP node and we are done.
3698 SDValue Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(),
3699 Op.getOperand(1), Op.getOperand(2),
3700 DAG.getConstant(CompareOpc, MVT::i32));
3701 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp);
3704 // Create the PPCISD altivec 'dot' comparison node.
3706 Op.getOperand(2), // LHS
3707 Op.getOperand(3), // RHS
3708 DAG.getConstant(CompareOpc, MVT::i32)
3710 std::vector<MVT> VTs;
3711 VTs.push_back(Op.getOperand(2).getValueType());
3712 VTs.push_back(MVT::Flag);
3713 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
3715 // Now that we have the comparison, emit a copy from the CR to a GPR.
3716 // This is flagged to the above dot comparison.
3717 SDValue Flags = DAG.getNode(PPCISD::MFCR, MVT::i32,
3718 DAG.getRegister(PPC::CR6, MVT::i32),
3719 CompNode.getValue(1));
3721 // Unpack the result based on how the target uses it.
3722 unsigned BitNo; // Bit # of CR6.
3723 bool InvertBit; // Invert result?
3724 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
3725 default: // Can't happen, don't crash on invalid number though.
3726 case 0: // Return the value of the EQ bit of CR6.
3727 BitNo = 0; InvertBit = false;
3729 case 1: // Return the inverted value of the EQ bit of CR6.
3730 BitNo = 0; InvertBit = true;
3732 case 2: // Return the value of the LT bit of CR6.
3733 BitNo = 2; InvertBit = false;
3735 case 3: // Return the inverted value of the LT bit of CR6.
3736 BitNo = 2; InvertBit = true;
3740 // Shift the bit into the low position.
3741 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags,
3742 DAG.getConstant(8-(3-BitNo), MVT::i32));
3744 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags,
3745 DAG.getConstant(1, MVT::i32));
3747 // If we are supposed to, toggle the bit.
3749 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags,
3750 DAG.getConstant(1, MVT::i32));
3754 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
3755 SelectionDAG &DAG) {
3756 // Create a stack slot that is 16-byte aligned.
3757 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
3758 int FrameIdx = FrameInfo->CreateStackObject(16, 16);
3759 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
3760 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
3762 // Store the input value into Value#0 of the stack slot.
3763 SDValue Store = DAG.getStore(DAG.getEntryNode(),
3764 Op.getOperand(0), FIdx, NULL, 0);
3766 return DAG.getLoad(Op.getValueType(), Store, FIdx, NULL, 0);
3769 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) {
3770 if (Op.getValueType() == MVT::v4i32) {
3771 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
3773 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG);
3774 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt.
3776 SDValue RHSSwap = // = vrlw RHS, 16
3777 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG);
3779 // Shrinkify inputs to v8i16.
3780 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS);
3781 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS);
3782 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap);
3784 // Low parts multiplied together, generating 32-bit results (we ignore the
3786 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
3787 LHS, RHS, DAG, MVT::v4i32);
3789 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
3790 LHS, RHSSwap, Zero, DAG, MVT::v4i32);
3791 // Shift the high parts up 16 bits.
3792 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG);
3793 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd);
3794 } else if (Op.getValueType() == MVT::v8i16) {
3795 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
3797 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG);
3799 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
3800 LHS, RHS, Zero, DAG);
3801 } else if (Op.getValueType() == MVT::v16i8) {
3802 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
3804 // Multiply the even 8-bit parts, producing 16-bit sums.
3805 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
3806 LHS, RHS, DAG, MVT::v8i16);
3807 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts);
3809 // Multiply the odd 8-bit parts, producing 16-bit sums.
3810 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
3811 LHS, RHS, DAG, MVT::v8i16);
3812 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts);
3814 // Merge the results together.
3816 for (unsigned i = 0; i != 8; ++i) {
3817 Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8);
3818 Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8);
3820 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts,
3821 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16));
3823 assert(0 && "Unknown mul to lower!");
3828 /// LowerOperation - Provide custom lowering hooks for some operations.
3830 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
3831 switch (Op.getOpcode()) {
3832 default: assert(0 && "Wasn't expecting to be able to lower this!");
3833 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3834 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3835 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3836 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
3837 case ISD::SETCC: return LowerSETCC(Op, DAG);
3838 case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG);
3840 return LowerVASTART(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
3841 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
3844 return LowerVAARG(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
3845 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
3847 case ISD::FORMAL_ARGUMENTS:
3848 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex,
3849 VarArgsStackOffset, VarArgsNumGPR,
3850 VarArgsNumFPR, PPCSubTarget);
3852 case ISD::CALL: return LowerCALL(Op, DAG, PPCSubTarget,
3853 getTargetMachine());
3854 case ISD::RET: return LowerRET(Op, DAG, getTargetMachine());
3855 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget);
3856 case ISD::DYNAMIC_STACKALLOC:
3857 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget);
3859 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
3860 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
3861 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
3862 case ISD::FP_ROUND_INREG: return LowerFP_ROUND_INREG(Op, DAG);
3863 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
3865 // Lower 64-bit shifts.
3866 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
3867 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG);
3868 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG);
3870 // Vector-related lowering.
3871 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
3872 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3873 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3874 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
3875 case ISD::MUL: return LowerMUL(Op, DAG);
3877 // Frame & Return address.
3878 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
3879 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
3884 SDNode *PPCTargetLowering::ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) {
3885 switch (N->getOpcode()) {
3886 default: assert(0 && "Wasn't expecting to be able to lower this!");
3887 case ISD::FP_TO_SINT: {
3888 SDValue Res = LowerFP_TO_SINT(SDValue(N, 0), DAG);
3889 // Use MERGE_VALUES to drop the chain result value and get a node with one
3890 // result. This requires turning off getMergeValues simplification, since
3891 // otherwise it will give us Res back.
3892 return DAG.getMergeValues(&Res, 1, false).getNode();
3898 //===----------------------------------------------------------------------===//
3899 // Other Lowering Code
3900 //===----------------------------------------------------------------------===//
3903 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
3904 bool is64bit, unsigned BinOpcode) {
3905 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
3906 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
3908 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3909 MachineFunction *F = BB->getParent();
3910 MachineFunction::iterator It = BB;
3913 unsigned dest = MI->getOperand(0).getReg();
3914 unsigned ptrA = MI->getOperand(1).getReg();
3915 unsigned ptrB = MI->getOperand(2).getReg();
3916 unsigned incr = MI->getOperand(3).getReg();
3918 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
3919 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
3920 F->insert(It, loopMBB);
3921 F->insert(It, exitMBB);
3922 exitMBB->transferSuccessors(BB);
3924 MachineRegisterInfo &RegInfo = F->getRegInfo();
3925 unsigned TmpReg = (!BinOpcode) ? incr :
3926 RegInfo.createVirtualRegister(
3927 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
3928 (const TargetRegisterClass *) &PPC::GPRCRegClass);
3932 // fallthrough --> loopMBB
3933 BB->addSuccessor(loopMBB);
3936 // l[wd]arx dest, ptr
3937 // add r0, dest, incr
3938 // st[wd]cx. r0, ptr
3940 // fallthrough --> exitMBB
3942 BuildMI(BB, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
3943 .addReg(ptrA).addReg(ptrB);
3945 BuildMI(BB, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
3946 BuildMI(BB, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
3947 .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
3948 BuildMI(BB, TII->get(PPC::BCC))
3949 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
3950 BB->addSuccessor(loopMBB);
3951 BB->addSuccessor(exitMBB);
3960 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
3961 MachineBasicBlock *BB,
3962 bool is8bit, // operation
3963 unsigned BinOpcode) {
3964 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
3965 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
3966 // In 64 bit mode we have to use 64 bits for addresses, even though the
3967 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address
3968 // registers without caring whether they're 32 or 64, but here we're
3969 // doing actual arithmetic on the addresses.
3970 bool is64bit = PPCSubTarget.isPPC64();
3972 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3973 MachineFunction *F = BB->getParent();
3974 MachineFunction::iterator It = BB;
3977 unsigned dest = MI->getOperand(0).getReg();
3978 unsigned ptrA = MI->getOperand(1).getReg();
3979 unsigned ptrB = MI->getOperand(2).getReg();
3980 unsigned incr = MI->getOperand(3).getReg();
3982 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
3983 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
3984 F->insert(It, loopMBB);
3985 F->insert(It, exitMBB);
3986 exitMBB->transferSuccessors(BB);
3988 MachineRegisterInfo &RegInfo = F->getRegInfo();
3989 const TargetRegisterClass *RC =
3990 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
3991 (const TargetRegisterClass *) &PPC::GPRCRegClass;
3992 unsigned PtrReg = RegInfo.createVirtualRegister(RC);
3993 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
3994 unsigned ShiftReg = RegInfo.createVirtualRegister(RC);
3995 unsigned Incr2Reg = RegInfo.createVirtualRegister(RC);
3996 unsigned MaskReg = RegInfo.createVirtualRegister(RC);
3997 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
3998 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
3999 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
4000 unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC);
4001 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
4002 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
4004 unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC);
4008 // fallthrough --> loopMBB
4009 BB->addSuccessor(loopMBB);
4011 // The 4-byte load must be aligned, while a char or short may be
4012 // anywhere in the word. Hence all this nasty bookkeeping code.
4013 // add ptr1, ptrA, ptrB [copy if ptrA==0]
4014 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
4015 // xori shift, shift1, 24 [16]
4016 // rlwinm ptr, ptr1, 0, 0, 29
4017 // slw incr2, incr, shift
4018 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
4019 // slw mask, mask2, shift
4021 // lwarx tmpDest, ptr
4022 // add tmp, tmpDest, incr2
4023 // andc tmp2, tmpDest, mask
4024 // and tmp3, tmp, mask
4025 // or tmp4, tmp3, tmp2
4028 // fallthrough --> exitMBB
4029 // srw dest, tmpDest, shift
4031 if (ptrA!=PPC::R0) {
4032 Ptr1Reg = RegInfo.createVirtualRegister(RC);
4033 BuildMI(BB, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
4034 .addReg(ptrA).addReg(ptrB);
4038 BuildMI(BB, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
4039 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
4040 BuildMI(BB, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
4041 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
4043 BuildMI(BB, TII->get(PPC::RLDICR), PtrReg)
4044 .addReg(Ptr1Reg).addImm(0).addImm(61);
4046 BuildMI(BB, TII->get(PPC::RLWINM), PtrReg)
4047 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
4048 BuildMI(BB, TII->get(PPC::SLW), Incr2Reg)
4049 .addReg(incr).addReg(ShiftReg);
4051 BuildMI(BB, TII->get(PPC::LI), Mask2Reg).addImm(255);
4053 BuildMI(BB, TII->get(PPC::LI), Mask3Reg).addImm(0);
4054 BuildMI(BB, TII->get(PPC::ORI), Mask2Reg).addReg(Mask3Reg).addImm(65535);
4056 BuildMI(BB, TII->get(PPC::SLW), MaskReg)
4057 .addReg(Mask2Reg).addReg(ShiftReg);
4060 BuildMI(BB, TII->get(PPC::LWARX), TmpDestReg)
4061 .addReg(PPC::R0).addReg(PtrReg);
4063 BuildMI(BB, TII->get(BinOpcode), TmpReg)
4064 .addReg(Incr2Reg).addReg(TmpDestReg);
4065 BuildMI(BB, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg)
4066 .addReg(TmpDestReg).addReg(MaskReg);
4067 BuildMI(BB, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg)
4068 .addReg(TmpReg).addReg(MaskReg);
4069 BuildMI(BB, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg)
4070 .addReg(Tmp3Reg).addReg(Tmp2Reg);
4071 BuildMI(BB, TII->get(PPC::STWCX))
4072 .addReg(Tmp4Reg).addReg(PPC::R0).addReg(PtrReg);
4073 BuildMI(BB, TII->get(PPC::BCC))
4074 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
4075 BB->addSuccessor(loopMBB);
4076 BB->addSuccessor(exitMBB);
4081 BuildMI(BB, TII->get(PPC::SRW), dest).addReg(TmpDestReg).addReg(ShiftReg);
4086 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
4087 MachineBasicBlock *BB) {
4088 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
4090 // To "insert" these instructions we actually have to insert their
4091 // control-flow patterns.
4092 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4093 MachineFunction::iterator It = BB;
4096 MachineFunction *F = BB->getParent();
4098 if (MI->getOpcode() == PPC::SELECT_CC_I4 ||
4099 MI->getOpcode() == PPC::SELECT_CC_I8 ||
4100 MI->getOpcode() == PPC::SELECT_CC_F4 ||
4101 MI->getOpcode() == PPC::SELECT_CC_F8 ||
4102 MI->getOpcode() == PPC::SELECT_CC_VRRC) {
4104 // The incoming instruction knows the destination vreg to set, the
4105 // condition code register to branch on, the true/false values to
4106 // select between, and a branch opcode to use.
4111 // cmpTY ccX, r1, r2
4113 // fallthrough --> copy0MBB
4114 MachineBasicBlock *thisMBB = BB;
4115 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4116 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4117 unsigned SelectPred = MI->getOperand(4).getImm();
4118 BuildMI(BB, TII->get(PPC::BCC))
4119 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
4120 F->insert(It, copy0MBB);
4121 F->insert(It, sinkMBB);
4122 // Update machine-CFG edges by transferring all successors of the current
4123 // block to the new block which will contain the Phi node for the select.
4124 sinkMBB->transferSuccessors(BB);
4125 // Next, add the true and fallthrough blocks as its successors.
4126 BB->addSuccessor(copy0MBB);
4127 BB->addSuccessor(sinkMBB);
4130 // %FalseValue = ...
4131 // # fallthrough to sinkMBB
4134 // Update machine-CFG edges
4135 BB->addSuccessor(sinkMBB);
4138 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
4141 BuildMI(BB, TII->get(PPC::PHI), MI->getOperand(0).getReg())
4142 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
4143 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
4145 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
4146 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
4147 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
4148 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
4149 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
4150 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4);
4151 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
4152 BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8);
4154 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
4155 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
4156 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
4157 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
4158 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
4159 BB = EmitAtomicBinary(MI, BB, false, PPC::AND);
4160 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
4161 BB = EmitAtomicBinary(MI, BB, true, PPC::AND8);
4163 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
4164 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
4165 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
4166 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
4167 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
4168 BB = EmitAtomicBinary(MI, BB, false, PPC::OR);
4169 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
4170 BB = EmitAtomicBinary(MI, BB, true, PPC::OR8);
4172 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
4173 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
4174 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
4175 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
4176 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
4177 BB = EmitAtomicBinary(MI, BB, false, PPC::XOR);
4178 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
4179 BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8);
4181 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
4182 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC);
4183 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
4184 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC);
4185 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
4186 BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC);
4187 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
4188 BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8);
4190 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
4191 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
4192 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
4193 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
4194 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
4195 BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF);
4196 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
4197 BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8);
4199 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8)
4200 BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
4201 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16)
4202 BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
4203 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32)
4204 BB = EmitAtomicBinary(MI, BB, false, 0);
4205 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64)
4206 BB = EmitAtomicBinary(MI, BB, true, 0);
4208 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
4209 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) {
4210 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
4212 unsigned dest = MI->getOperand(0).getReg();
4213 unsigned ptrA = MI->getOperand(1).getReg();
4214 unsigned ptrB = MI->getOperand(2).getReg();
4215 unsigned oldval = MI->getOperand(3).getReg();
4216 unsigned newval = MI->getOperand(4).getReg();
4218 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
4219 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
4220 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
4221 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
4222 F->insert(It, loop1MBB);
4223 F->insert(It, loop2MBB);
4224 F->insert(It, midMBB);
4225 F->insert(It, exitMBB);
4226 exitMBB->transferSuccessors(BB);
4230 // fallthrough --> loopMBB
4231 BB->addSuccessor(loop1MBB);
4234 // l[wd]arx dest, ptr
4235 // cmp[wd] dest, oldval
4238 // st[wd]cx. newval, ptr
4242 // st[wd]cx. dest, ptr
4245 BuildMI(BB, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
4246 .addReg(ptrA).addReg(ptrB);
4247 BuildMI(BB, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
4248 .addReg(oldval).addReg(dest);
4249 BuildMI(BB, TII->get(PPC::BCC))
4250 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
4251 BB->addSuccessor(loop2MBB);
4252 BB->addSuccessor(midMBB);
4255 BuildMI(BB, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
4256 .addReg(newval).addReg(ptrA).addReg(ptrB);
4257 BuildMI(BB, TII->get(PPC::BCC))
4258 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
4259 BuildMI(BB, TII->get(PPC::B)).addMBB(exitMBB);
4260 BB->addSuccessor(loop1MBB);
4261 BB->addSuccessor(exitMBB);
4264 BuildMI(BB, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
4265 .addReg(dest).addReg(ptrA).addReg(ptrB);
4266 BB->addSuccessor(exitMBB);
4271 } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
4272 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
4273 // We must use 64-bit registers for addresses when targeting 64-bit,
4274 // since we're actually doing arithmetic on them. Other registers
4276 bool is64bit = PPCSubTarget.isPPC64();
4277 bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
4279 unsigned dest = MI->getOperand(0).getReg();
4280 unsigned ptrA = MI->getOperand(1).getReg();
4281 unsigned ptrB = MI->getOperand(2).getReg();
4282 unsigned oldval = MI->getOperand(3).getReg();
4283 unsigned newval = MI->getOperand(4).getReg();
4285 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
4286 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
4287 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
4288 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
4289 F->insert(It, loop1MBB);
4290 F->insert(It, loop2MBB);
4291 F->insert(It, midMBB);
4292 F->insert(It, exitMBB);
4293 exitMBB->transferSuccessors(BB);
4295 MachineRegisterInfo &RegInfo = F->getRegInfo();
4296 const TargetRegisterClass *RC =
4297 is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
4298 (const TargetRegisterClass *) &PPC::GPRCRegClass;
4299 unsigned PtrReg = RegInfo.createVirtualRegister(RC);
4300 unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
4301 unsigned ShiftReg = RegInfo.createVirtualRegister(RC);
4302 unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC);
4303 unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC);
4304 unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC);
4305 unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC);
4306 unsigned MaskReg = RegInfo.createVirtualRegister(RC);
4307 unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
4308 unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
4309 unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
4310 unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
4311 unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
4313 unsigned TmpReg = RegInfo.createVirtualRegister(RC);
4316 // fallthrough --> loopMBB
4317 BB->addSuccessor(loop1MBB);
4319 // The 4-byte load must be aligned, while a char or short may be
4320 // anywhere in the word. Hence all this nasty bookkeeping code.
4321 // add ptr1, ptrA, ptrB [copy if ptrA==0]
4322 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
4323 // xori shift, shift1, 24 [16]
4324 // rlwinm ptr, ptr1, 0, 0, 29
4325 // slw newval2, newval, shift
4326 // slw oldval2, oldval,shift
4327 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
4328 // slw mask, mask2, shift
4329 // and newval3, newval2, mask
4330 // and oldval3, oldval2, mask
4332 // lwarx tmpDest, ptr
4333 // and tmp, tmpDest, mask
4334 // cmpw tmp, oldval3
4337 // andc tmp2, tmpDest, mask
4338 // or tmp4, tmp2, newval3
4343 // stwcx. tmpDest, ptr
4345 // srw dest, tmpDest, shift
4346 if (ptrA!=PPC::R0) {
4347 Ptr1Reg = RegInfo.createVirtualRegister(RC);
4348 BuildMI(BB, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
4349 .addReg(ptrA).addReg(ptrB);
4353 BuildMI(BB, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
4354 .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
4355 BuildMI(BB, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
4356 .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
4358 BuildMI(BB, TII->get(PPC::RLDICR), PtrReg)
4359 .addReg(Ptr1Reg).addImm(0).addImm(61);
4361 BuildMI(BB, TII->get(PPC::RLWINM), PtrReg)
4362 .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
4363 BuildMI(BB, TII->get(PPC::SLW), NewVal2Reg)
4364 .addReg(newval).addReg(ShiftReg);
4365 BuildMI(BB, TII->get(PPC::SLW), OldVal2Reg)
4366 .addReg(oldval).addReg(ShiftReg);
4368 BuildMI(BB, TII->get(PPC::LI), Mask2Reg).addImm(255);
4370 BuildMI(BB, TII->get(PPC::LI), Mask3Reg).addImm(0);
4371 BuildMI(BB, TII->get(PPC::ORI), Mask2Reg).addReg(Mask3Reg).addImm(65535);
4373 BuildMI(BB, TII->get(PPC::SLW), MaskReg)
4374 .addReg(Mask2Reg).addReg(ShiftReg);
4375 BuildMI(BB, TII->get(PPC::AND), NewVal3Reg)
4376 .addReg(NewVal2Reg).addReg(MaskReg);
4377 BuildMI(BB, TII->get(PPC::AND), OldVal3Reg)
4378 .addReg(OldVal2Reg).addReg(MaskReg);
4381 BuildMI(BB, TII->get(PPC::LWARX), TmpDestReg)
4382 .addReg(PPC::R0).addReg(PtrReg);
4383 BuildMI(BB, TII->get(PPC::AND),TmpReg).addReg(TmpDestReg).addReg(MaskReg);
4384 BuildMI(BB, TII->get(PPC::CMPW), PPC::CR0)
4385 .addReg(TmpReg).addReg(OldVal3Reg);
4386 BuildMI(BB, TII->get(PPC::BCC))
4387 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
4388 BB->addSuccessor(loop2MBB);
4389 BB->addSuccessor(midMBB);
4392 BuildMI(BB, TII->get(PPC::ANDC),Tmp2Reg).addReg(TmpDestReg).addReg(MaskReg);
4393 BuildMI(BB, TII->get(PPC::OR),Tmp4Reg).addReg(Tmp2Reg).addReg(NewVal3Reg);
4394 BuildMI(BB, TII->get(PPC::STWCX)).addReg(Tmp4Reg)
4395 .addReg(PPC::R0).addReg(PtrReg);
4396 BuildMI(BB, TII->get(PPC::BCC))
4397 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
4398 BuildMI(BB, TII->get(PPC::B)).addMBB(exitMBB);
4399 BB->addSuccessor(loop1MBB);
4400 BB->addSuccessor(exitMBB);
4403 BuildMI(BB, TII->get(PPC::STWCX)).addReg(TmpDestReg)
4404 .addReg(PPC::R0).addReg(PtrReg);
4405 BB->addSuccessor(exitMBB);
4410 BuildMI(BB, TII->get(PPC::SRW),dest).addReg(TmpReg).addReg(ShiftReg);
4412 assert(0 && "Unexpected instr type to insert");
4415 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
4419 //===----------------------------------------------------------------------===//
4420 // Target Optimization Hooks
4421 //===----------------------------------------------------------------------===//
4423 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
4424 DAGCombinerInfo &DCI) const {
4425 TargetMachine &TM = getTargetMachine();
4426 SelectionDAG &DAG = DCI.DAG;
4427 switch (N->getOpcode()) {
4430 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
4431 if (C->getZExtValue() == 0) // 0 << V -> 0.
4432 return N->getOperand(0);
4436 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
4437 if (C->getZExtValue() == 0) // 0 >>u V -> 0.
4438 return N->getOperand(0);
4442 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
4443 if (C->getZExtValue() == 0 || // 0 >>s V -> 0.
4444 C->isAllOnesValue()) // -1 >>s V -> -1.
4445 return N->getOperand(0);
4449 case ISD::SINT_TO_FP:
4450 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
4451 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
4452 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
4453 // We allow the src/dst to be either f32/f64, but the intermediate
4454 // type must be i64.
4455 if (N->getOperand(0).getValueType() == MVT::i64 &&
4456 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) {
4457 SDValue Val = N->getOperand(0).getOperand(0);
4458 if (Val.getValueType() == MVT::f32) {
4459 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
4460 DCI.AddToWorklist(Val.getNode());
4463 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val);
4464 DCI.AddToWorklist(Val.getNode());
4465 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val);
4466 DCI.AddToWorklist(Val.getNode());
4467 if (N->getValueType(0) == MVT::f32) {
4468 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val,
4469 DAG.getIntPtrConstant(0));
4470 DCI.AddToWorklist(Val.getNode());
4473 } else if (N->getOperand(0).getValueType() == MVT::i32) {
4474 // If the intermediate type is i32, we can avoid the load/store here
4481 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
4482 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
4483 !cast<StoreSDNode>(N)->isTruncatingStore() &&
4484 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
4485 N->getOperand(1).getValueType() == MVT::i32 &&
4486 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) {
4487 SDValue Val = N->getOperand(1).getOperand(0);
4488 if (Val.getValueType() == MVT::f32) {
4489 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
4490 DCI.AddToWorklist(Val.getNode());
4492 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val);
4493 DCI.AddToWorklist(Val.getNode());
4495 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val,
4496 N->getOperand(2), N->getOperand(3));
4497 DCI.AddToWorklist(Val.getNode());
4501 // Turn STORE (BSWAP) -> sthbrx/stwbrx.
4502 if (N->getOperand(1).getOpcode() == ISD::BSWAP &&
4503 N->getOperand(1).getNode()->hasOneUse() &&
4504 (N->getOperand(1).getValueType() == MVT::i32 ||
4505 N->getOperand(1).getValueType() == MVT::i16)) {
4506 SDValue BSwapOp = N->getOperand(1).getOperand(0);
4507 // Do an any-extend to 32-bits if this is a half-word input.
4508 if (BSwapOp.getValueType() == MVT::i16)
4509 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp);
4511 return DAG.getNode(PPCISD::STBRX, MVT::Other, N->getOperand(0), BSwapOp,
4512 N->getOperand(2), N->getOperand(3),
4513 DAG.getValueType(N->getOperand(1).getValueType()));
4517 // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
4518 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
4519 N->getOperand(0).hasOneUse() &&
4520 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) {
4521 SDValue Load = N->getOperand(0);
4522 LoadSDNode *LD = cast<LoadSDNode>(Load);
4523 // Create the byte-swapping load.
4524 std::vector<MVT> VTs;
4525 VTs.push_back(MVT::i32);
4526 VTs.push_back(MVT::Other);
4527 SDValue MO = DAG.getMemOperand(LD->getMemOperand());
4529 LD->getChain(), // Chain
4530 LD->getBasePtr(), // Ptr
4532 DAG.getValueType(N->getValueType(0)) // VT
4534 SDValue BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4);
4536 // If this is an i16 load, insert the truncate.
4537 SDValue ResVal = BSLoad;
4538 if (N->getValueType(0) == MVT::i16)
4539 ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad);
4541 // First, combine the bswap away. This makes the value produced by the
4543 DCI.CombineTo(N, ResVal);
4545 // Next, combine the load away, we give it a bogus result value but a real
4546 // chain result. The result value is dead because the bswap is dead.
4547 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
4549 // Return N so it doesn't get rechecked!
4550 return SDValue(N, 0);
4554 case PPCISD::VCMP: {
4555 // If a VCMPo node already exists with exactly the same operands as this
4556 // node, use its result instead of this node (VCMPo computes both a CR6 and
4557 // a normal output).
4559 if (!N->getOperand(0).hasOneUse() &&
4560 !N->getOperand(1).hasOneUse() &&
4561 !N->getOperand(2).hasOneUse()) {
4563 // Scan all of the users of the LHS, looking for VCMPo's that match.
4564 SDNode *VCMPoNode = 0;
4566 SDNode *LHSN = N->getOperand(0).getNode();
4567 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
4569 if (UI->getOpcode() == PPCISD::VCMPo &&
4570 UI->getOperand(1) == N->getOperand(1) &&
4571 UI->getOperand(2) == N->getOperand(2) &&
4572 UI->getOperand(0) == N->getOperand(0)) {
4577 // If there is no VCMPo node, or if the flag value has a single use, don't
4579 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
4582 // Look at the (necessarily single) use of the flag value. If it has a
4583 // chain, this transformation is more complex. Note that multiple things
4584 // could use the value result, which we should ignore.
4585 SDNode *FlagUser = 0;
4586 for (SDNode::use_iterator UI = VCMPoNode->use_begin();
4587 FlagUser == 0; ++UI) {
4588 assert(UI != VCMPoNode->use_end() && "Didn't find user!");
4590 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
4591 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
4598 // If the user is a MFCR instruction, we know this is safe. Otherwise we
4599 // give up for right now.
4600 if (FlagUser->getOpcode() == PPCISD::MFCR)
4601 return SDValue(VCMPoNode, 0);
4606 // If this is a branch on an altivec predicate comparison, lower this so
4607 // that we don't have to do a MFCR: instead, branch directly on CR6. This
4608 // lowering is done pre-legalize, because the legalizer lowers the predicate
4609 // compare down to code that is difficult to reassemble.
4610 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
4611 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
4615 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
4616 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
4617 getAltivecCompareInfo(LHS, CompareOpc, isDot)) {
4618 assert(isDot && "Can't compare against a vector result!");
4620 // If this is a comparison against something other than 0/1, then we know
4621 // that the condition is never/always true.
4622 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
4623 if (Val != 0 && Val != 1) {
4624 if (CC == ISD::SETEQ) // Cond never true, remove branch.
4625 return N->getOperand(0);
4626 // Always !=, turn it into an unconditional branch.
4627 return DAG.getNode(ISD::BR, MVT::Other,
4628 N->getOperand(0), N->getOperand(4));
4631 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
4633 // Create the PPCISD altivec 'dot' comparison node.
4634 std::vector<MVT> VTs;
4636 LHS.getOperand(2), // LHS of compare
4637 LHS.getOperand(3), // RHS of compare
4638 DAG.getConstant(CompareOpc, MVT::i32)
4640 VTs.push_back(LHS.getOperand(2).getValueType());
4641 VTs.push_back(MVT::Flag);
4642 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
4644 // Unpack the result based on how the target uses it.
4645 PPC::Predicate CompOpc;
4646 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
4647 default: // Can't happen, don't crash on invalid number though.
4648 case 0: // Branch on the value of the EQ bit of CR6.
4649 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
4651 case 1: // Branch on the inverted value of the EQ bit of CR6.
4652 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
4654 case 2: // Branch on the value of the LT bit of CR6.
4655 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
4657 case 3: // Branch on the inverted value of the LT bit of CR6.
4658 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
4662 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0),
4663 DAG.getConstant(CompOpc, MVT::i32),
4664 DAG.getRegister(PPC::CR6, MVT::i32),
4665 N->getOperand(4), CompNode.getValue(1));
4674 //===----------------------------------------------------------------------===//
4675 // Inline Assembly Support
4676 //===----------------------------------------------------------------------===//
4678 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
4682 const SelectionDAG &DAG,
4683 unsigned Depth) const {
4684 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
4685 switch (Op.getOpcode()) {
4687 case PPCISD::LBRX: {
4688 // lhbrx is known to have the top bits cleared out.
4689 if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16)
4690 KnownZero = 0xFFFF0000;
4693 case ISD::INTRINSIC_WO_CHAIN: {
4694 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
4696 case Intrinsic::ppc_altivec_vcmpbfp_p:
4697 case Intrinsic::ppc_altivec_vcmpeqfp_p:
4698 case Intrinsic::ppc_altivec_vcmpequb_p:
4699 case Intrinsic::ppc_altivec_vcmpequh_p:
4700 case Intrinsic::ppc_altivec_vcmpequw_p:
4701 case Intrinsic::ppc_altivec_vcmpgefp_p:
4702 case Intrinsic::ppc_altivec_vcmpgtfp_p:
4703 case Intrinsic::ppc_altivec_vcmpgtsb_p:
4704 case Intrinsic::ppc_altivec_vcmpgtsh_p:
4705 case Intrinsic::ppc_altivec_vcmpgtsw_p:
4706 case Intrinsic::ppc_altivec_vcmpgtub_p:
4707 case Intrinsic::ppc_altivec_vcmpgtuh_p:
4708 case Intrinsic::ppc_altivec_vcmpgtuw_p:
4709 KnownZero = ~1U; // All bits but the low one are known to be zero.
4717 /// getConstraintType - Given a constraint, return the type of
4718 /// constraint it is for this target.
4719 PPCTargetLowering::ConstraintType
4720 PPCTargetLowering::getConstraintType(const std::string &Constraint) const {
4721 if (Constraint.size() == 1) {
4722 switch (Constraint[0]) {
4729 return C_RegisterClass;
4732 return TargetLowering::getConstraintType(Constraint);
4735 std::pair<unsigned, const TargetRegisterClass*>
4736 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
4738 if (Constraint.size() == 1) {
4739 // GCC RS6000 Constraint Letters
4740 switch (Constraint[0]) {
4743 if (VT == MVT::i64 && PPCSubTarget.isPPC64())
4744 return std::make_pair(0U, PPC::G8RCRegisterClass);
4745 return std::make_pair(0U, PPC::GPRCRegisterClass);
4748 return std::make_pair(0U, PPC::F4RCRegisterClass);
4749 else if (VT == MVT::f64)
4750 return std::make_pair(0U, PPC::F8RCRegisterClass);
4753 return std::make_pair(0U, PPC::VRRCRegisterClass);
4755 return std::make_pair(0U, PPC::CRRCRegisterClass);
4759 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
4763 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
4764 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is true
4765 /// it means one of the asm constraint of the inline asm instruction being
4766 /// processed is 'm'.
4767 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char Letter,
4769 std::vector<SDValue>&Ops,
4770 SelectionDAG &DAG) const {
4771 SDValue Result(0,0);
4782 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
4783 if (!CST) return; // Must be an immediate to match.
4784 unsigned Value = CST->getZExtValue();
4786 default: assert(0 && "Unknown constraint letter!");
4787 case 'I': // "I" is a signed 16-bit constant.
4788 if ((short)Value == (int)Value)
4789 Result = DAG.getTargetConstant(Value, Op.getValueType());
4791 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
4792 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
4793 if ((short)Value == 0)
4794 Result = DAG.getTargetConstant(Value, Op.getValueType());
4796 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
4797 if ((Value >> 16) == 0)
4798 Result = DAG.getTargetConstant(Value, Op.getValueType());
4800 case 'M': // "M" is a constant that is greater than 31.
4802 Result = DAG.getTargetConstant(Value, Op.getValueType());
4804 case 'N': // "N" is a positive constant that is an exact power of two.
4805 if ((int)Value > 0 && isPowerOf2_32(Value))
4806 Result = DAG.getTargetConstant(Value, Op.getValueType());
4808 case 'O': // "O" is the constant zero.
4810 Result = DAG.getTargetConstant(Value, Op.getValueType());
4812 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
4813 if ((short)-Value == (int)-Value)
4814 Result = DAG.getTargetConstant(Value, Op.getValueType());
4821 if (Result.getNode()) {
4822 Ops.push_back(Result);
4826 // Handle standard constraint letters.
4827 TargetLowering::LowerAsmOperandForConstraint(Op, Letter, hasMemory, Ops, DAG);
4830 // isLegalAddressingMode - Return true if the addressing mode represented
4831 // by AM is legal for this target, for a load/store of the specified type.
4832 bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM,
4833 const Type *Ty) const {
4834 // FIXME: PPC does not allow r+i addressing modes for vectors!
4836 // PPC allows a sign-extended 16-bit immediate field.
4837 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
4840 // No global is ever allowed as a base.
4844 // PPC only support r+r,
4846 case 0: // "r+i" or just "i", depending on HasBaseReg.
4849 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
4851 // Otherwise we have r+r or r+i.
4854 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
4856 // Allow 2*r as r+r.
4859 // No other scales are supported.
4866 /// isLegalAddressImmediate - Return true if the integer value can be used
4867 /// as the offset of the target addressing mode for load / store of the
4869 bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,const Type *Ty) const{
4870 // PPC allows a sign-extended 16-bit immediate field.
4871 return (V > -(1 << 16) && V < (1 << 16)-1);
4874 bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
4878 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) {
4879 // Depths > 0 not supported yet!
4880 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
4883 MachineFunction &MF = DAG.getMachineFunction();
4884 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4886 // Just load the return address off the stack.
4887 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
4889 // Make sure the function really does not optimize away the store of the RA
4891 FuncInfo->setLRStoreRequired();
4892 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0);
4895 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
4896 // Depths > 0 not supported yet!
4897 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
4900 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
4901 bool isPPC64 = PtrVT == MVT::i64;
4903 MachineFunction &MF = DAG.getMachineFunction();
4904 MachineFrameInfo *MFI = MF.getFrameInfo();
4905 bool is31 = (NoFramePointerElim || MFI->hasVarSizedObjects())
4906 && MFI->getStackSize();
4909 return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::X31 : PPC::X1,
4912 return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::R31 : PPC::R1,