1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the PPCISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "PPCISelLowering.h"
15 #include "PPCMachineFunctionInfo.h"
16 #include "PPCPredicates.h"
17 #include "PPCTargetMachine.h"
18 #include "PPCPerfectShuffle.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/VectorExtras.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/PseudoSourceValue.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CallingConv.h"
29 #include "llvm/Constants.h"
30 #include "llvm/Function.h"
31 #include "llvm/Intrinsics.h"
32 #include "llvm/ParameterAttributes.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
38 static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc",
39 cl::desc("enable preincrement load/store generation on PPC (experimental)"),
42 PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
43 : TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) {
47 // Use _setjmp/_longjmp instead of setjmp/longjmp.
48 setUseUnderscoreSetJmp(true);
49 setUseUnderscoreLongJmp(true);
51 // Set up the register classes.
52 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
53 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
54 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
56 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
57 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
58 setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
60 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
62 // PowerPC has pre-inc load and store's.
63 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
64 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
65 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
66 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
67 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
68 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
69 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
70 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
71 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
72 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
74 // Shortening conversions involving ppcf128 get expanded (2 regs -> 1 reg)
75 setConvertAction(MVT::ppcf128, MVT::f64, Expand);
76 setConvertAction(MVT::ppcf128, MVT::f32, Expand);
77 // This is used in the ppcf128->int sequence. Note it has different semantics
78 // from FP_ROUND: that rounds to nearest, this rounds to zero.
79 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
81 // PowerPC has no intrinsics for these particular operations
82 setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
84 // PowerPC has no SREM/UREM instructions
85 setOperationAction(ISD::SREM, MVT::i32, Expand);
86 setOperationAction(ISD::UREM, MVT::i32, Expand);
87 setOperationAction(ISD::SREM, MVT::i64, Expand);
88 setOperationAction(ISD::UREM, MVT::i64, Expand);
90 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
91 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
92 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
93 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
94 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
95 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
96 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
97 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
98 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
100 // We don't support sin/cos/sqrt/fmod/pow
101 setOperationAction(ISD::FSIN , MVT::f64, Expand);
102 setOperationAction(ISD::FCOS , MVT::f64, Expand);
103 setOperationAction(ISD::FREM , MVT::f64, Expand);
104 setOperationAction(ISD::FPOW , MVT::f64, Expand);
105 setOperationAction(ISD::FSIN , MVT::f32, Expand);
106 setOperationAction(ISD::FCOS , MVT::f32, Expand);
107 setOperationAction(ISD::FREM , MVT::f32, Expand);
108 setOperationAction(ISD::FPOW , MVT::f32, Expand);
110 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
112 // If we're enabling GP optimizations, use hardware square root
113 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
114 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
115 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
118 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
119 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
121 // PowerPC does not have BSWAP, CTPOP or CTTZ
122 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
123 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
124 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
125 setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
126 setOperationAction(ISD::CTPOP, MVT::i64 , Expand);
127 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
129 // PowerPC does not have ROTR
130 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
132 // PowerPC does not have Select
133 setOperationAction(ISD::SELECT, MVT::i32, Expand);
134 setOperationAction(ISD::SELECT, MVT::i64, Expand);
135 setOperationAction(ISD::SELECT, MVT::f32, Expand);
136 setOperationAction(ISD::SELECT, MVT::f64, Expand);
138 // PowerPC wants to turn select_cc of FP into fsel when possible.
139 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
140 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
142 // PowerPC wants to optimize integer setcc a bit
143 setOperationAction(ISD::SETCC, MVT::i32, Custom);
145 // PowerPC does not have BRCOND which requires SetCC
146 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
148 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
150 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
151 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
153 // PowerPC does not have [U|S]INT_TO_FP
154 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
155 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
157 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
158 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
159 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand);
160 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand);
162 // We cannot sextinreg(i1). Expand to shifts.
163 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
165 // Support label based line numbers.
166 setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
167 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
169 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
170 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
171 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
172 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
175 // We want to legalize GlobalAddress and ConstantPool nodes into the
176 // appropriate instructions to materialize the address.
177 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
178 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
179 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
180 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
181 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
182 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
183 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
184 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
186 // RET must be custom lowered, to meet ABI requirements
187 setOperationAction(ISD::RET , MVT::Other, Custom);
189 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
190 setOperationAction(ISD::VASTART , MVT::Other, Custom);
192 // VAARG is custom lowered with ELF 32 ABI
193 if (TM.getSubtarget<PPCSubtarget>().isELF32_ABI())
194 setOperationAction(ISD::VAARG, MVT::Other, Custom);
196 setOperationAction(ISD::VAARG, MVT::Other, Expand);
198 // Use the default implementation.
199 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
200 setOperationAction(ISD::VAEND , MVT::Other, Expand);
201 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
202 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom);
203 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
204 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
206 setOperationAction(ISD::ATOMIC_LOAD_ADD , MVT::i32 , Custom);
207 setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32 , Custom);
208 setOperationAction(ISD::ATOMIC_SWAP , MVT::i32 , Custom);
209 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
210 setOperationAction(ISD::ATOMIC_LOAD_ADD , MVT::i64 , Custom);
211 setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64 , Custom);
212 setOperationAction(ISD::ATOMIC_SWAP , MVT::i64 , Custom);
215 // We want to custom lower some of our intrinsics.
216 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
218 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
219 // They also have instructions for converting between i64 and fp.
220 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
221 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
222 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
223 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
224 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
226 // FIXME: disable this lowered code. This generates 64-bit register values,
227 // and we don't model the fact that the top part is clobbered by calls. We
228 // need to flag these together so that the value isn't live across a call.
229 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
231 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
232 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
234 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
235 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
238 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) {
239 // 64-bit PowerPC implementations can support i64 types directly
240 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
241 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
242 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
243 // 64-bit PowerPC wants to expand i128 shifts itself.
244 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
245 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
246 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
248 // 32-bit PowerPC wants to expand i64 shifts itself.
249 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
250 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
251 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
254 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
255 // First set operation action for all vector types to expand. Then we
256 // will selectively turn on ones that can be effectively codegen'd.
257 for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
258 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
259 MVT VT = (MVT::SimpleValueType)i;
261 // add/sub are legal for all supported vector VT's.
262 setOperationAction(ISD::ADD , VT, Legal);
263 setOperationAction(ISD::SUB , VT, Legal);
265 // We promote all shuffles to v16i8.
266 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
267 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
269 // We promote all non-typed operations to v4i32.
270 setOperationAction(ISD::AND , VT, Promote);
271 AddPromotedToType (ISD::AND , VT, MVT::v4i32);
272 setOperationAction(ISD::OR , VT, Promote);
273 AddPromotedToType (ISD::OR , VT, MVT::v4i32);
274 setOperationAction(ISD::XOR , VT, Promote);
275 AddPromotedToType (ISD::XOR , VT, MVT::v4i32);
276 setOperationAction(ISD::LOAD , VT, Promote);
277 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32);
278 setOperationAction(ISD::SELECT, VT, Promote);
279 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
280 setOperationAction(ISD::STORE, VT, Promote);
281 AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
283 // No other operations are legal.
284 setOperationAction(ISD::MUL , VT, Expand);
285 setOperationAction(ISD::SDIV, VT, Expand);
286 setOperationAction(ISD::SREM, VT, Expand);
287 setOperationAction(ISD::UDIV, VT, Expand);
288 setOperationAction(ISD::UREM, VT, Expand);
289 setOperationAction(ISD::FDIV, VT, Expand);
290 setOperationAction(ISD::FNEG, VT, Expand);
291 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
292 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
293 setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
294 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
295 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
296 setOperationAction(ISD::UDIVREM, VT, Expand);
297 setOperationAction(ISD::SDIVREM, VT, Expand);
298 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
299 setOperationAction(ISD::FPOW, VT, Expand);
300 setOperationAction(ISD::CTPOP, VT, Expand);
301 setOperationAction(ISD::CTLZ, VT, Expand);
302 setOperationAction(ISD::CTTZ, VT, Expand);
305 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
306 // with merges, splats, etc.
307 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
309 setOperationAction(ISD::AND , MVT::v4i32, Legal);
310 setOperationAction(ISD::OR , MVT::v4i32, Legal);
311 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
312 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
313 setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
314 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
316 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
317 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
318 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
319 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
321 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
322 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
323 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
324 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
326 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
327 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
329 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
330 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
331 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
332 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
335 setShiftAmountType(MVT::i32);
336 setSetCCResultContents(ZeroOrOneSetCCResult);
338 if (TM.getSubtarget<PPCSubtarget>().isPPC64()) {
339 setStackPointerRegisterToSaveRestore(PPC::X1);
340 setExceptionPointerRegister(PPC::X3);
341 setExceptionSelectorRegister(PPC::X4);
343 setStackPointerRegisterToSaveRestore(PPC::R1);
344 setExceptionPointerRegister(PPC::R3);
345 setExceptionSelectorRegister(PPC::R4);
348 // We have target-specific dag combine patterns for the following nodes:
349 setTargetDAGCombine(ISD::SINT_TO_FP);
350 setTargetDAGCombine(ISD::STORE);
351 setTargetDAGCombine(ISD::BR_CC);
352 setTargetDAGCombine(ISD::BSWAP);
354 // Darwin long double math library functions have $LDBL128 appended.
355 if (TM.getSubtarget<PPCSubtarget>().isDarwin()) {
356 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
357 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
358 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
359 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
360 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
363 computeRegisterProperties();
366 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
367 /// function arguments in the caller parameter area.
368 unsigned PPCTargetLowering::getByValTypeAlignment(const Type *Ty) const {
369 TargetMachine &TM = getTargetMachine();
370 // Darwin passes everything on 4 byte boundary.
371 if (TM.getSubtarget<PPCSubtarget>().isDarwin())
377 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
380 case PPCISD::FSEL: return "PPCISD::FSEL";
381 case PPCISD::FCFID: return "PPCISD::FCFID";
382 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
383 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
384 case PPCISD::STFIWX: return "PPCISD::STFIWX";
385 case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
386 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
387 case PPCISD::VPERM: return "PPCISD::VPERM";
388 case PPCISD::Hi: return "PPCISD::Hi";
389 case PPCISD::Lo: return "PPCISD::Lo";
390 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
391 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
392 case PPCISD::SRL: return "PPCISD::SRL";
393 case PPCISD::SRA: return "PPCISD::SRA";
394 case PPCISD::SHL: return "PPCISD::SHL";
395 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32";
396 case PPCISD::STD_32: return "PPCISD::STD_32";
397 case PPCISD::CALL_ELF: return "PPCISD::CALL_ELF";
398 case PPCISD::CALL_Macho: return "PPCISD::CALL_Macho";
399 case PPCISD::MTCTR: return "PPCISD::MTCTR";
400 case PPCISD::BCTRL_Macho: return "PPCISD::BCTRL_Macho";
401 case PPCISD::BCTRL_ELF: return "PPCISD::BCTRL_ELF";
402 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
403 case PPCISD::MFCR: return "PPCISD::MFCR";
404 case PPCISD::VCMP: return "PPCISD::VCMP";
405 case PPCISD::VCMPo: return "PPCISD::VCMPo";
406 case PPCISD::LBRX: return "PPCISD::LBRX";
407 case PPCISD::STBRX: return "PPCISD::STBRX";
408 case PPCISD::ATOMIC_LOAD_ADD: return "PPCISD::ATOMIC_LOAD_ADD";
409 case PPCISD::ATOMIC_CMP_SWAP: return "PPCISD::ATOMIC_CMP_SWAP";
410 case PPCISD::ATOMIC_SWAP: return "PPCISD::ATOMIC_SWAP";
411 case PPCISD::LARX: return "PPCISD::LARX";
412 case PPCISD::STCX: return "PPCISD::STCX";
413 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
414 case PPCISD::MFFS: return "PPCISD::MFFS";
415 case PPCISD::MTFSB0: return "PPCISD::MTFSB0";
416 case PPCISD::MTFSB1: return "PPCISD::MTFSB1";
417 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
418 case PPCISD::MTFSF: return "PPCISD::MTFSF";
419 case PPCISD::TAILCALL: return "PPCISD::TAILCALL";
420 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
425 MVT PPCTargetLowering::getSetCCResultType(const SDOperand &) const {
430 //===----------------------------------------------------------------------===//
431 // Node matching predicates, for use by the tblgen matching code.
432 //===----------------------------------------------------------------------===//
434 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
435 static bool isFloatingPointZero(SDOperand Op) {
436 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
437 return CFP->getValueAPF().isZero();
438 else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) {
439 // Maybe this has already been legalized into the constant pool?
440 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
441 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
442 return CFP->getValueAPF().isZero();
447 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
448 /// true if Op is undef or if it matches the specified value.
449 static bool isConstantOrUndef(SDOperand Op, unsigned Val) {
450 return Op.getOpcode() == ISD::UNDEF ||
451 cast<ConstantSDNode>(Op)->getValue() == Val;
454 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
455 /// VPKUHUM instruction.
456 bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) {
458 for (unsigned i = 0; i != 16; ++i)
459 if (!isConstantOrUndef(N->getOperand(i), i*2+1))
462 for (unsigned i = 0; i != 8; ++i)
463 if (!isConstantOrUndef(N->getOperand(i), i*2+1) ||
464 !isConstantOrUndef(N->getOperand(i+8), i*2+1))
470 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
471 /// VPKUWUM instruction.
472 bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) {
474 for (unsigned i = 0; i != 16; i += 2)
475 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
476 !isConstantOrUndef(N->getOperand(i+1), i*2+3))
479 for (unsigned i = 0; i != 8; i += 2)
480 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
481 !isConstantOrUndef(N->getOperand(i+1), i*2+3) ||
482 !isConstantOrUndef(N->getOperand(i+8), i*2+2) ||
483 !isConstantOrUndef(N->getOperand(i+9), i*2+3))
489 /// isVMerge - Common function, used to match vmrg* shuffles.
491 static bool isVMerge(SDNode *N, unsigned UnitSize,
492 unsigned LHSStart, unsigned RHSStart) {
493 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
494 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
495 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
496 "Unsupported merge size!");
498 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
499 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
500 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j),
501 LHSStart+j+i*UnitSize) ||
502 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j),
503 RHSStart+j+i*UnitSize))
509 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
510 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
511 bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
513 return isVMerge(N, UnitSize, 8, 24);
514 return isVMerge(N, UnitSize, 8, 8);
517 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
518 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
519 bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
521 return isVMerge(N, UnitSize, 0, 16);
522 return isVMerge(N, UnitSize, 0, 0);
526 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
527 /// amount, otherwise return -1.
528 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
529 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
530 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
531 // Find the first non-undef value in the shuffle mask.
533 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i)
536 if (i == 16) return -1; // all undef.
538 // Otherwise, check to see if the rest of the elements are consequtively
539 // numbered from this value.
540 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue();
541 if (ShiftAmt < i) return -1;
545 // Check the rest of the elements to see if they are consequtive.
546 for (++i; i != 16; ++i)
547 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i))
550 // Check the rest of the elements to see if they are consequtive.
551 for (++i; i != 16; ++i)
552 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15))
559 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
560 /// specifies a splat of a single element that is suitable for input to
561 /// VSPLTB/VSPLTH/VSPLTW.
562 bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) {
563 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
564 N->getNumOperands() == 16 &&
565 (EltSize == 1 || EltSize == 2 || EltSize == 4));
567 // This is a splat operation if each element of the permute is the same, and
568 // if the value doesn't reference the second vector.
569 unsigned ElementBase = 0;
570 SDOperand Elt = N->getOperand(0);
571 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt))
572 ElementBase = EltV->getValue();
574 return false; // FIXME: Handle UNDEF elements too!
576 if (cast<ConstantSDNode>(Elt)->getValue() >= 16)
579 // Check that they are consequtive.
580 for (unsigned i = 1; i != EltSize; ++i) {
581 if (!isa<ConstantSDNode>(N->getOperand(i)) ||
582 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase)
586 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
587 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
588 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
589 assert(isa<ConstantSDNode>(N->getOperand(i)) &&
590 "Invalid VECTOR_SHUFFLE mask!");
591 for (unsigned j = 0; j != EltSize; ++j)
592 if (N->getOperand(i+j) != N->getOperand(j))
599 /// isAllNegativeZeroVector - Returns true if all elements of build_vector
601 bool PPC::isAllNegativeZeroVector(SDNode *N) {
602 assert(N->getOpcode() == ISD::BUILD_VECTOR);
603 if (PPC::isSplatShuffleMask(N, N->getNumOperands()))
604 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N))
605 return CFP->getValueAPF().isNegZero();
609 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
610 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
611 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
612 assert(isSplatShuffleMask(N, EltSize));
613 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize;
616 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
617 /// by using a vspltis[bhw] instruction of the specified element size, return
618 /// the constant being splatted. The ByteSize field indicates the number of
619 /// bytes of each element [124] -> [bhw].
620 SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
621 SDOperand OpVal(0, 0);
623 // If ByteSize of the splat is bigger than the element size of the
624 // build_vector, then we have a case where we are checking for a splat where
625 // multiple elements of the buildvector are folded together into a single
626 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
627 unsigned EltSize = 16/N->getNumOperands();
628 if (EltSize < ByteSize) {
629 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
630 SDOperand UniquedVals[4];
631 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
633 // See if all of the elements in the buildvector agree across.
634 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
635 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
636 // If the element isn't a constant, bail fully out.
637 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand();
640 if (UniquedVals[i&(Multiple-1)].Val == 0)
641 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
642 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
643 return SDOperand(); // no match.
646 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
647 // either constant or undef values that are identical for each chunk. See
648 // if these chunks can form into a larger vspltis*.
650 // Check to see if all of the leading entries are either 0 or -1. If
651 // neither, then this won't fit into the immediate field.
652 bool LeadingZero = true;
653 bool LeadingOnes = true;
654 for (unsigned i = 0; i != Multiple-1; ++i) {
655 if (UniquedVals[i].Val == 0) continue; // Must have been undefs.
657 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
658 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
660 // Finally, check the least significant entry.
662 if (UniquedVals[Multiple-1].Val == 0)
663 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef
664 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue();
666 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4)
669 if (UniquedVals[Multiple-1].Val == 0)
670 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef
671 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended();
672 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
673 return DAG.getTargetConstant(Val, MVT::i32);
679 // Check to see if this buildvec has a single non-undef value in its elements.
680 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
681 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
683 OpVal = N->getOperand(i);
684 else if (OpVal != N->getOperand(i))
688 if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def.
690 unsigned ValSizeInBytes = 0;
692 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
693 Value = CN->getValue();
694 ValSizeInBytes = CN->getValueType(0).getSizeInBits()/8;
695 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
696 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
697 Value = FloatToBits(CN->getValueAPF().convertToFloat());
701 // If the splat value is larger than the element value, then we can never do
702 // this splat. The only case that we could fit the replicated bits into our
703 // immediate field for would be zero, and we prefer to use vxor for it.
704 if (ValSizeInBytes < ByteSize) return SDOperand();
706 // If the element value is larger than the splat value, cut it in half and
707 // check to see if the two halves are equal. Continue doing this until we
708 // get to ByteSize. This allows us to handle 0x01010101 as 0x01.
709 while (ValSizeInBytes > ByteSize) {
710 ValSizeInBytes >>= 1;
712 // If the top half equals the bottom half, we're still ok.
713 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
714 (Value & ((1 << (8*ValSizeInBytes))-1)))
718 // Properly sign extend the value.
719 int ShAmt = (4-ByteSize)*8;
720 int MaskVal = ((int)Value << ShAmt) >> ShAmt;
722 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
723 if (MaskVal == 0) return SDOperand();
725 // Finally, if this value fits in a 5 bit sext field, return it
726 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal)
727 return DAG.getTargetConstant(MaskVal, MVT::i32);
731 //===----------------------------------------------------------------------===//
732 // Addressing Mode Selection
733 //===----------------------------------------------------------------------===//
735 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
736 /// or 64-bit immediate, and if the value can be accurately represented as a
737 /// sign extension from a 16-bit value. If so, this returns true and the
739 static bool isIntS16Immediate(SDNode *N, short &Imm) {
740 if (N->getOpcode() != ISD::Constant)
743 Imm = (short)cast<ConstantSDNode>(N)->getValue();
744 if (N->getValueType(0) == MVT::i32)
745 return Imm == (int32_t)cast<ConstantSDNode>(N)->getValue();
747 return Imm == (int64_t)cast<ConstantSDNode>(N)->getValue();
749 static bool isIntS16Immediate(SDOperand Op, short &Imm) {
750 return isIntS16Immediate(Op.Val, Imm);
754 /// SelectAddressRegReg - Given the specified addressed, check to see if it
755 /// can be represented as an indexed [r+r] operation. Returns false if it
756 /// can be more efficiently represented with [r+imm].
757 bool PPCTargetLowering::SelectAddressRegReg(SDOperand N, SDOperand &Base,
761 if (N.getOpcode() == ISD::ADD) {
762 if (isIntS16Immediate(N.getOperand(1), imm))
764 if (N.getOperand(1).getOpcode() == PPCISD::Lo)
767 Base = N.getOperand(0);
768 Index = N.getOperand(1);
770 } else if (N.getOpcode() == ISD::OR) {
771 if (isIntS16Immediate(N.getOperand(1), imm))
772 return false; // r+i can fold it if we can.
774 // If this is an or of disjoint bitfields, we can codegen this as an add
775 // (for better address arithmetic) if the LHS and RHS of the OR are provably
777 APInt LHSKnownZero, LHSKnownOne;
778 APInt RHSKnownZero, RHSKnownOne;
779 DAG.ComputeMaskedBits(N.getOperand(0),
780 APInt::getAllOnesValue(N.getOperand(0)
781 .getValueSizeInBits()),
782 LHSKnownZero, LHSKnownOne);
784 if (LHSKnownZero.getBoolValue()) {
785 DAG.ComputeMaskedBits(N.getOperand(1),
786 APInt::getAllOnesValue(N.getOperand(1)
787 .getValueSizeInBits()),
788 RHSKnownZero, RHSKnownOne);
789 // If all of the bits are known zero on the LHS or RHS, the add won't
791 if (~(LHSKnownZero | RHSKnownZero) == 0) {
792 Base = N.getOperand(0);
793 Index = N.getOperand(1);
802 /// Returns true if the address N can be represented by a base register plus
803 /// a signed 16-bit displacement [r+imm], and if it is not better
804 /// represented as reg+reg.
805 bool PPCTargetLowering::SelectAddressRegImm(SDOperand N, SDOperand &Disp,
806 SDOperand &Base, SelectionDAG &DAG){
807 // If this can be more profitably realized as r+r, fail.
808 if (SelectAddressRegReg(N, Disp, Base, DAG))
811 if (N.getOpcode() == ISD::ADD) {
813 if (isIntS16Immediate(N.getOperand(1), imm)) {
814 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
815 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
816 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
818 Base = N.getOperand(0);
820 return true; // [r+i]
821 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
822 // Match LOAD (ADD (X, Lo(G))).
823 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue()
824 && "Cannot handle constant offsets yet!");
825 Disp = N.getOperand(1).getOperand(0); // The global address.
826 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
827 Disp.getOpcode() == ISD::TargetConstantPool ||
828 Disp.getOpcode() == ISD::TargetJumpTable);
829 Base = N.getOperand(0);
830 return true; // [&g+r]
832 } else if (N.getOpcode() == ISD::OR) {
834 if (isIntS16Immediate(N.getOperand(1), imm)) {
835 // If this is an or of disjoint bitfields, we can codegen this as an add
836 // (for better address arithmetic) if the LHS and RHS of the OR are
837 // provably disjoint.
838 APInt LHSKnownZero, LHSKnownOne;
839 DAG.ComputeMaskedBits(N.getOperand(0),
840 APInt::getAllOnesValue(N.getOperand(0)
841 .getValueSizeInBits()),
842 LHSKnownZero, LHSKnownOne);
844 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
845 // If all of the bits are known zero on the LHS or RHS, the add won't
847 Base = N.getOperand(0);
848 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
852 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
853 // Loading from a constant address.
855 // If this address fits entirely in a 16-bit sext immediate field, codegen
858 if (isIntS16Immediate(CN, Imm)) {
859 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
860 Base = DAG.getRegister(PPC::R0, CN->getValueType(0));
864 // Handle 32-bit sext immediates with LIS + addr mode.
865 if (CN->getValueType(0) == MVT::i32 ||
866 (int64_t)CN->getValue() == (int)CN->getValue()) {
867 int Addr = (int)CN->getValue();
869 // Otherwise, break this down into an LIS + disp.
870 Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
872 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32);
873 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
874 Base = SDOperand(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0);
879 Disp = DAG.getTargetConstant(0, getPointerTy());
880 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
881 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
884 return true; // [r+0]
887 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
888 /// represented as an indexed [r+r] operation.
889 bool PPCTargetLowering::SelectAddressRegRegOnly(SDOperand N, SDOperand &Base,
892 // Check to see if we can easily represent this as an [r+r] address. This
893 // will fail if it thinks that the address is more profitably represented as
894 // reg+imm, e.g. where imm = 0.
895 if (SelectAddressRegReg(N, Base, Index, DAG))
898 // If the operand is an addition, always emit this as [r+r], since this is
899 // better (for code size, and execution, as the memop does the add for free)
900 // than emitting an explicit add.
901 if (N.getOpcode() == ISD::ADD) {
902 Base = N.getOperand(0);
903 Index = N.getOperand(1);
907 // Otherwise, do it the hard way, using R0 as the base register.
908 Base = DAG.getRegister(PPC::R0, N.getValueType());
913 /// SelectAddressRegImmShift - Returns true if the address N can be
914 /// represented by a base register plus a signed 14-bit displacement
915 /// [r+imm*4]. Suitable for use by STD and friends.
916 bool PPCTargetLowering::SelectAddressRegImmShift(SDOperand N, SDOperand &Disp,
919 // If this can be more profitably realized as r+r, fail.
920 if (SelectAddressRegReg(N, Disp, Base, DAG))
923 if (N.getOpcode() == ISD::ADD) {
925 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
926 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
927 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
928 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
930 Base = N.getOperand(0);
932 return true; // [r+i]
933 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
934 // Match LOAD (ADD (X, Lo(G))).
935 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue()
936 && "Cannot handle constant offsets yet!");
937 Disp = N.getOperand(1).getOperand(0); // The global address.
938 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
939 Disp.getOpcode() == ISD::TargetConstantPool ||
940 Disp.getOpcode() == ISD::TargetJumpTable);
941 Base = N.getOperand(0);
942 return true; // [&g+r]
944 } else if (N.getOpcode() == ISD::OR) {
946 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
947 // If this is an or of disjoint bitfields, we can codegen this as an add
948 // (for better address arithmetic) if the LHS and RHS of the OR are
949 // provably disjoint.
950 APInt LHSKnownZero, LHSKnownOne;
951 DAG.ComputeMaskedBits(N.getOperand(0),
952 APInt::getAllOnesValue(N.getOperand(0)
953 .getValueSizeInBits()),
954 LHSKnownZero, LHSKnownOne);
955 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
956 // If all of the bits are known zero on the LHS or RHS, the add won't
958 Base = N.getOperand(0);
959 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
963 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
964 // Loading from a constant address. Verify low two bits are clear.
965 if ((CN->getValue() & 3) == 0) {
966 // If this address fits entirely in a 14-bit sext immediate field, codegen
969 if (isIntS16Immediate(CN, Imm)) {
970 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy());
971 Base = DAG.getRegister(PPC::R0, CN->getValueType(0));
975 // Fold the low-part of 32-bit absolute addresses into addr mode.
976 if (CN->getValueType(0) == MVT::i32 ||
977 (int64_t)CN->getValue() == (int)CN->getValue()) {
978 int Addr = (int)CN->getValue();
980 // Otherwise, break this down into an LIS + disp.
981 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32);
983 Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32);
984 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
985 Base = SDOperand(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0);
991 Disp = DAG.getTargetConstant(0, getPointerTy());
992 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
993 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
996 return true; // [r+0]
1000 /// getPreIndexedAddressParts - returns true by value, base pointer and
1001 /// offset pointer and addressing mode by reference if the node's address
1002 /// can be legally represented as pre-indexed load / store address.
1003 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base,
1005 ISD::MemIndexedMode &AM,
1006 SelectionDAG &DAG) {
1007 // Disabled by default for now.
1008 if (!EnablePPCPreinc) return false;
1012 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1013 Ptr = LD->getBasePtr();
1014 VT = LD->getMemoryVT();
1016 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
1018 Ptr = ST->getBasePtr();
1019 VT = ST->getMemoryVT();
1023 // PowerPC doesn't have preinc load/store instructions for vectors.
1027 // TODO: Check reg+reg first.
1029 // LDU/STU use reg+imm*4, others use reg+imm.
1030 if (VT != MVT::i64) {
1032 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG))
1036 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG))
1040 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1041 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
1042 // sext i32 to i64 when addr mode is r+i.
1043 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
1044 LD->getExtensionType() == ISD::SEXTLOAD &&
1045 isa<ConstantSDNode>(Offset))
1053 //===----------------------------------------------------------------------===//
1054 // LowerOperation implementation
1055 //===----------------------------------------------------------------------===//
1057 SDOperand PPCTargetLowering::LowerConstantPool(SDOperand Op,
1058 SelectionDAG &DAG) {
1059 MVT PtrVT = Op.getValueType();
1060 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
1061 Constant *C = CP->getConstVal();
1062 SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
1063 SDOperand Zero = DAG.getConstant(0, PtrVT);
1065 const TargetMachine &TM = DAG.getTarget();
1067 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero);
1068 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero);
1070 // If this is a non-darwin platform, we don't support non-static relo models
1072 if (TM.getRelocationModel() == Reloc::Static ||
1073 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
1074 // Generate non-pic code that has direct accesses to the constant pool.
1075 // The address of the global is just (hi(&g)+lo(&g)).
1076 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1079 if (TM.getRelocationModel() == Reloc::PIC_) {
1080 // With PIC, the first instruction is actually "GR+hi(&G)".
1081 Hi = DAG.getNode(ISD::ADD, PtrVT,
1082 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
1085 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1089 SDOperand PPCTargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
1090 MVT PtrVT = Op.getValueType();
1091 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1092 SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
1093 SDOperand Zero = DAG.getConstant(0, PtrVT);
1095 const TargetMachine &TM = DAG.getTarget();
1097 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero);
1098 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero);
1100 // If this is a non-darwin platform, we don't support non-static relo models
1102 if (TM.getRelocationModel() == Reloc::Static ||
1103 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
1104 // Generate non-pic code that has direct accesses to the constant pool.
1105 // The address of the global is just (hi(&g)+lo(&g)).
1106 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1109 if (TM.getRelocationModel() == Reloc::PIC_) {
1110 // With PIC, the first instruction is actually "GR+hi(&G)".
1111 Hi = DAG.getNode(ISD::ADD, PtrVT,
1112 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
1115 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1119 SDOperand PPCTargetLowering::LowerGlobalTLSAddress(SDOperand Op,
1120 SelectionDAG &DAG) {
1121 assert(0 && "TLS not implemented for PPC.");
1122 return SDOperand(); // Not reached
1125 SDOperand PPCTargetLowering::LowerGlobalAddress(SDOperand Op,
1126 SelectionDAG &DAG) {
1127 MVT PtrVT = Op.getValueType();
1128 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
1129 GlobalValue *GV = GSDN->getGlobal();
1130 SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
1131 // If it's a debug information descriptor, don't mess with it.
1132 if (DAG.isVerifiedDebugInfoDesc(Op))
1134 SDOperand Zero = DAG.getConstant(0, PtrVT);
1136 const TargetMachine &TM = DAG.getTarget();
1138 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero);
1139 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero);
1141 // If this is a non-darwin platform, we don't support non-static relo models
1143 if (TM.getRelocationModel() == Reloc::Static ||
1144 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
1145 // Generate non-pic code that has direct accesses to globals.
1146 // The address of the global is just (hi(&g)+lo(&g)).
1147 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1150 if (TM.getRelocationModel() == Reloc::PIC_) {
1151 // With PIC, the first instruction is actually "GR+hi(&G)".
1152 Hi = DAG.getNode(ISD::ADD, PtrVT,
1153 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
1156 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1158 if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV))
1161 // If the global is weak or external, we have to go through the lazy
1163 return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, NULL, 0);
1166 SDOperand PPCTargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) {
1167 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1169 // If we're comparing for equality to zero, expose the fact that this is
1170 // implented as a ctlz/srl pair on ppc, so that the dag combiner can
1171 // fold the new nodes.
1172 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1173 if (C->isNullValue() && CC == ISD::SETEQ) {
1174 MVT VT = Op.getOperand(0).getValueType();
1175 SDOperand Zext = Op.getOperand(0);
1176 if (VT.bitsLT(MVT::i32)) {
1178 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0));
1180 unsigned Log2b = Log2_32(VT.getSizeInBits());
1181 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext);
1182 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz,
1183 DAG.getConstant(Log2b, MVT::i32));
1184 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc);
1186 // Leave comparisons against 0 and -1 alone for now, since they're usually
1187 // optimized. FIXME: revisit this when we can custom lower all setcc
1189 if (C->isAllOnesValue() || C->isNullValue())
1193 // If we have an integer seteq/setne, turn it into a compare against zero
1194 // by xor'ing the rhs with the lhs, which is faster than setting a
1195 // condition register, reading it back out, and masking the correct bit. The
1196 // normal approach here uses sub to do this instead of xor. Using xor exposes
1197 // the result to other bit-twiddling opportunities.
1198 MVT LHSVT = Op.getOperand(0).getValueType();
1199 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1200 MVT VT = Op.getValueType();
1201 SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0),
1203 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC);
1208 SDOperand PPCTargetLowering::LowerVAARG(SDOperand Op, SelectionDAG &DAG,
1209 int VarArgsFrameIndex,
1210 int VarArgsStackOffset,
1211 unsigned VarArgsNumGPR,
1212 unsigned VarArgsNumFPR,
1213 const PPCSubtarget &Subtarget) {
1215 assert(0 && "VAARG in ELF32 ABI not implemented yet!");
1216 return SDOperand(); // Not reached
1219 SDOperand PPCTargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG,
1220 int VarArgsFrameIndex,
1221 int VarArgsStackOffset,
1222 unsigned VarArgsNumGPR,
1223 unsigned VarArgsNumFPR,
1224 const PPCSubtarget &Subtarget) {
1226 if (Subtarget.isMachoABI()) {
1227 // vastart just stores the address of the VarArgsFrameIndex slot into the
1228 // memory location argument.
1229 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1230 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1231 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1232 return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0);
1235 // For ELF 32 ABI we follow the layout of the va_list struct.
1236 // We suppose the given va_list is already allocated.
1239 // char gpr; /* index into the array of 8 GPRs
1240 // * stored in the register save area
1241 // * gpr=0 corresponds to r3,
1242 // * gpr=1 to r4, etc.
1244 // char fpr; /* index into the array of 8 FPRs
1245 // * stored in the register save area
1246 // * fpr=0 corresponds to f1,
1247 // * fpr=1 to f2, etc.
1249 // char *overflow_arg_area;
1250 // /* location on stack that holds
1251 // * the next overflow argument
1253 // char *reg_save_area;
1254 // /* where r3:r10 and f1:f8 (if saved)
1260 SDOperand ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i8);
1261 SDOperand ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8);
1264 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1266 SDOperand StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT);
1267 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1269 uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
1270 SDOperand ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT);
1272 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
1273 SDOperand ConstStackOffset = DAG.getConstant(StackOffset, PtrVT);
1275 uint64_t FPROffset = 1;
1276 SDOperand ConstFPROffset = DAG.getConstant(FPROffset, PtrVT);
1278 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1280 // Store first byte : number of int regs
1281 SDOperand firstStore = DAG.getStore(Op.getOperand(0), ArgGPR,
1282 Op.getOperand(1), SV, 0);
1283 uint64_t nextOffset = FPROffset;
1284 SDOperand nextPtr = DAG.getNode(ISD::ADD, PtrVT, Op.getOperand(1),
1287 // Store second byte : number of float regs
1288 SDOperand secondStore =
1289 DAG.getStore(firstStore, ArgFPR, nextPtr, SV, nextOffset);
1290 nextOffset += StackOffset;
1291 nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstStackOffset);
1293 // Store second word : arguments given on stack
1294 SDOperand thirdStore =
1295 DAG.getStore(secondStore, StackOffsetFI, nextPtr, SV, nextOffset);
1296 nextOffset += FrameOffset;
1297 nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstFrameOffset);
1299 // Store third word : arguments given in registers
1300 return DAG.getStore(thirdStore, FR, nextPtr, SV, nextOffset);
1304 #include "PPCGenCallingConv.inc"
1306 /// GetFPR - Get the set of FP registers that should be allocated for arguments,
1307 /// depending on which subtarget is selected.
1308 static const unsigned *GetFPR(const PPCSubtarget &Subtarget) {
1309 if (Subtarget.isMachoABI()) {
1310 static const unsigned FPR[] = {
1311 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1312 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
1318 static const unsigned FPR[] = {
1319 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1325 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
1327 static unsigned CalculateStackSlotSize(SDOperand Arg, SDOperand Flag,
1328 bool isVarArg, unsigned PtrByteSize) {
1329 MVT ArgVT = Arg.getValueType();
1330 ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Flag)->getArgFlags();
1331 unsigned ArgSize =ArgVT.getSizeInBits()/8;
1332 if (Flags.isByVal())
1333 ArgSize = Flags.getByValSize();
1334 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
1340 PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op,
1342 int &VarArgsFrameIndex,
1343 int &VarArgsStackOffset,
1344 unsigned &VarArgsNumGPR,
1345 unsigned &VarArgsNumFPR,
1346 const PPCSubtarget &Subtarget) {
1347 // TODO: add description of PPC stack frame format, or at least some docs.
1349 MachineFunction &MF = DAG.getMachineFunction();
1350 MachineFrameInfo *MFI = MF.getFrameInfo();
1351 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1352 SmallVector<SDOperand, 8> ArgValues;
1353 SDOperand Root = Op.getOperand(0);
1354 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1356 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1357 bool isPPC64 = PtrVT == MVT::i64;
1358 bool isMachoABI = Subtarget.isMachoABI();
1359 bool isELF32_ABI = Subtarget.isELF32_ABI();
1360 // Potential tail calls could cause overwriting of argument stack slots.
1361 unsigned CC = MF.getFunction()->getCallingConv();
1362 bool isImmutable = !(PerformTailCallOpt && (CC==CallingConv::Fast));
1363 unsigned PtrByteSize = isPPC64 ? 8 : 4;
1365 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
1366 // Area that is at least reserved in caller of this function.
1367 unsigned MinReservedArea = ArgOffset;
1369 static const unsigned GPR_32[] = { // 32-bit registers.
1370 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
1371 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1373 static const unsigned GPR_64[] = { // 64-bit registers.
1374 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
1375 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
1378 static const unsigned *FPR = GetFPR(Subtarget);
1380 static const unsigned VR[] = {
1381 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
1382 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
1385 const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
1386 const unsigned Num_FPR_Regs = isMachoABI ? 13 : 8;
1387 const unsigned Num_VR_Regs = array_lengthof( VR);
1389 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
1391 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
1393 // In 32-bit non-varargs functions, the stack space for vectors is after the
1394 // stack space for non-vectors. We do not use this space unless we have
1395 // too many vectors to fit in registers, something that only occurs in
1396 // constructed examples:), but we have to walk the arglist to figure
1397 // that out...for the pathological case, compute VecArgOffset as the
1398 // start of the vector parameter area. Computing VecArgOffset is the
1399 // entire point of the following loop.
1400 // Altivec is not mentioned in the ppc32 Elf Supplement, so I'm not trying
1401 // to handle Elf here.
1402 unsigned VecArgOffset = ArgOffset;
1403 if (!isVarArg && !isPPC64) {
1404 for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e;
1406 MVT ObjectVT = Op.getValue(ArgNo).getValueType();
1407 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1408 ISD::ArgFlagsTy Flags =
1409 cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags();
1411 if (Flags.isByVal()) {
1412 // ObjSize is the true size, ArgSize rounded up to multiple of regs.
1413 ObjSize = Flags.getByValSize();
1415 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
1416 VecArgOffset += ArgSize;
1420 switch(ObjectVT.getSimpleVT()) {
1421 default: assert(0 && "Unhandled argument type!");
1424 VecArgOffset += isPPC64 ? 8 : 4;
1426 case MVT::i64: // PPC64
1434 // Nothing to do, we're only looking at Nonvector args here.
1439 // We've found where the vector parameter area in memory is. Skip the
1440 // first 12 parameters; these don't use that memory.
1441 VecArgOffset = ((VecArgOffset+15)/16)*16;
1442 VecArgOffset += 12*16;
1444 // Add DAG nodes to load the arguments or copy them out of registers. On
1445 // entry to a function on PPC, the arguments start after the linkage area,
1446 // although the first ones are often in registers.
1448 // In the ELF 32 ABI, GPRs and stack are double word align: an argument
1449 // represented with two words (long long or double) must be copied to an
1450 // even GPR_idx value or to an even ArgOffset value.
1452 SmallVector<SDOperand, 8> MemOps;
1453 unsigned nAltivecParamsAtEnd = 0;
1454 for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) {
1456 bool needsLoad = false;
1457 MVT ObjectVT = Op.getValue(ArgNo).getValueType();
1458 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
1459 unsigned ArgSize = ObjSize;
1460 ISD::ArgFlagsTy Flags =
1461 cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags();
1462 // See if next argument requires stack alignment in ELF
1463 bool Align = Flags.isSplit();
1465 unsigned CurArgOffset = ArgOffset;
1467 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
1468 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
1469 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
1470 if (isVarArg || isPPC64) {
1471 MinReservedArea = ((MinReservedArea+15)/16)*16;
1472 MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo),
1473 Op.getOperand(ArgNo+3),
1476 } else nAltivecParamsAtEnd++;
1478 // Calculate min reserved area.
1479 MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo),
1480 Op.getOperand(ArgNo+3),
1484 // FIXME alignment for ELF may not be right
1485 // FIXME the codegen can be much improved in some cases.
1486 // We do not have to keep everything in memory.
1487 if (Flags.isByVal()) {
1488 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
1489 ObjSize = Flags.getByValSize();
1490 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
1491 // Double word align in ELF
1492 if (Align && isELF32_ABI) GPR_idx += (GPR_idx % 2);
1493 // Objects of size 1 and 2 are right justified, everything else is
1494 // left justified. This means the memory address is adjusted forwards.
1495 if (ObjSize==1 || ObjSize==2) {
1496 CurArgOffset = CurArgOffset + (4 - ObjSize);
1498 // The value of the object is its address.
1499 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset);
1500 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT);
1501 ArgValues.push_back(FIN);
1502 if (ObjSize==1 || ObjSize==2) {
1503 if (GPR_idx != Num_GPR_Regs) {
1504 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
1505 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1506 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
1507 SDOperand Store = DAG.getTruncStore(Val.getValue(1), Val, FIN,
1508 NULL, 0, ObjSize==1 ? MVT::i8 : MVT::i16 );
1509 MemOps.push_back(Store);
1511 if (isMachoABI) ArgOffset += PtrByteSize;
1513 ArgOffset += PtrByteSize;
1517 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
1518 // Store whatever pieces of the object are in registers
1519 // to memory. ArgVal will be address of the beginning of
1521 if (GPR_idx != Num_GPR_Regs) {
1522 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
1523 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1524 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset);
1525 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT);
1526 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
1527 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
1528 MemOps.push_back(Store);
1530 if (isMachoABI) ArgOffset += PtrByteSize;
1532 ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
1539 switch (ObjectVT.getSimpleVT()) {
1540 default: assert(0 && "Unhandled argument type!");
1543 // Double word align in ELF
1544 if (Align && isELF32_ABI) GPR_idx += (GPR_idx % 2);
1546 if (GPR_idx != Num_GPR_Regs) {
1547 unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
1548 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1549 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32);
1553 ArgSize = PtrByteSize;
1555 // Stack align in ELF
1556 if (needsLoad && Align && isELF32_ABI)
1557 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
1558 // All int arguments reserve stack space in Macho ABI.
1559 if (isMachoABI || needsLoad) ArgOffset += PtrByteSize;
1563 case MVT::i64: // PPC64
1564 if (GPR_idx != Num_GPR_Regs) {
1565 unsigned VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
1566 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1567 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64);
1569 if (ObjectVT == MVT::i32) {
1570 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
1571 // value to MVT::i64 and then truncate to the correct register size.
1573 ArgVal = DAG.getNode(ISD::AssertSext, MVT::i64, ArgVal,
1574 DAG.getValueType(ObjectVT));
1575 else if (Flags.isZExt())
1576 ArgVal = DAG.getNode(ISD::AssertZext, MVT::i64, ArgVal,
1577 DAG.getValueType(ObjectVT));
1579 ArgVal = DAG.getNode(ISD::TRUNCATE, MVT::i32, ArgVal);
1585 ArgSize = PtrByteSize;
1587 // All int arguments reserve stack space in Macho ABI.
1588 if (isMachoABI || needsLoad) ArgOffset += 8;
1593 // Every 4 bytes of argument space consumes one of the GPRs available for
1594 // argument passing.
1595 if (GPR_idx != Num_GPR_Regs && isMachoABI) {
1597 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
1600 if (FPR_idx != Num_FPR_Regs) {
1602 if (ObjectVT == MVT::f32)
1603 VReg = RegInfo.createVirtualRegister(&PPC::F4RCRegClass);
1605 VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
1606 RegInfo.addLiveIn(FPR[FPR_idx], VReg);
1607 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
1613 // Stack align in ELF
1614 if (needsLoad && Align && isELF32_ABI)
1615 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
1616 // All FP arguments reserve stack space in Macho ABI.
1617 if (isMachoABI || needsLoad) ArgOffset += isPPC64 ? 8 : ObjSize;
1623 // Note that vector arguments in registers don't reserve stack space,
1624 // except in varargs functions.
1625 if (VR_idx != Num_VR_Regs) {
1626 unsigned VReg = RegInfo.createVirtualRegister(&PPC::VRRCRegClass);
1627 RegInfo.addLiveIn(VR[VR_idx], VReg);
1628 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
1630 while ((ArgOffset % 16) != 0) {
1631 ArgOffset += PtrByteSize;
1632 if (GPR_idx != Num_GPR_Regs)
1636 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs);
1640 if (!isVarArg && !isPPC64) {
1641 // Vectors go after all the nonvectors.
1642 CurArgOffset = VecArgOffset;
1645 // Vectors are aligned.
1646 ArgOffset = ((ArgOffset+15)/16)*16;
1647 CurArgOffset = ArgOffset;
1655 // We need to load the argument to a virtual register if we determined above
1656 // that we ran out of physical registers of the appropriate type.
1658 int FI = MFI->CreateFixedObject(ObjSize,
1659 CurArgOffset + (ArgSize - ObjSize),
1661 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT);
1662 ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0);
1665 ArgValues.push_back(ArgVal);
1668 // Set the size that is at least reserved in caller of this function. Tail
1669 // call optimized function's reserved stack space needs to be aligned so that
1670 // taking the difference between two stack areas will result in an aligned
1672 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1673 // Add the Altivec parameters at the end, if needed.
1674 if (nAltivecParamsAtEnd) {
1675 MinReservedArea = ((MinReservedArea+15)/16)*16;
1676 MinReservedArea += 16*nAltivecParamsAtEnd;
1679 std::max(MinReservedArea,
1680 PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI));
1681 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()->
1682 getStackAlignment();
1683 unsigned AlignMask = TargetAlign-1;
1684 MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
1685 FI->setMinReservedArea(MinReservedArea);
1687 // If the function takes variable number of arguments, make a frame index for
1688 // the start of the first vararg value... for expansion of llvm.va_start.
1693 VarArgsNumGPR = GPR_idx;
1694 VarArgsNumFPR = FPR_idx;
1696 // Make room for Num_GPR_Regs, Num_FPR_Regs and for a possible frame
1698 depth = -(Num_GPR_Regs * PtrVT.getSizeInBits()/8 +
1699 Num_FPR_Regs * MVT(MVT::f64).getSizeInBits()/8 +
1700 PtrVT.getSizeInBits()/8);
1702 VarArgsStackOffset = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
1709 VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
1711 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1713 // In ELF 32 ABI, the fixed integer arguments of a variadic function are
1714 // stored to the VarArgsFrameIndex on the stack.
1716 for (GPR_idx = 0; GPR_idx != VarArgsNumGPR; ++GPR_idx) {
1717 SDOperand Val = DAG.getRegister(GPR[GPR_idx], PtrVT);
1718 SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0);
1719 MemOps.push_back(Store);
1720 // Increment the address by four for the next argument to store
1721 SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
1722 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
1726 // If this function is vararg, store any remaining integer argument regs
1727 // to their spots on the stack so that they may be loaded by deferencing the
1728 // result of va_next.
1729 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
1732 VReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
1734 VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
1736 RegInfo.addLiveIn(GPR[GPR_idx], VReg);
1737 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
1738 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
1739 MemOps.push_back(Store);
1740 // Increment the address by four for the next argument to store
1741 SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
1742 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
1745 // In ELF 32 ABI, the double arguments are stored to the VarArgsFrameIndex
1748 for (FPR_idx = 0; FPR_idx != VarArgsNumFPR; ++FPR_idx) {
1749 SDOperand Val = DAG.getRegister(FPR[FPR_idx], MVT::f64);
1750 SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0);
1751 MemOps.push_back(Store);
1752 // Increment the address by eight for the next argument to store
1753 SDOperand PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
1755 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
1758 for (; FPR_idx != Num_FPR_Regs; ++FPR_idx) {
1760 VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
1762 RegInfo.addLiveIn(FPR[FPR_idx], VReg);
1763 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::f64);
1764 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
1765 MemOps.push_back(Store);
1766 // Increment the address by eight for the next argument to store
1767 SDOperand PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
1769 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
1774 if (!MemOps.empty())
1775 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size());
1777 ArgValues.push_back(Root);
1779 // Return the new list of results.
1780 return DAG.getMergeValues(Op.Val->getVTList(), &ArgValues[0],
1784 /// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus
1787 CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
1793 unsigned &nAltivecParamsAtEnd) {
1794 // Count how many bytes are to be pushed on the stack, including the linkage
1795 // area, and parameter passing area. We start with 24/48 bytes, which is
1796 // prereserved space for [SP][CR][LR][3 x unused].
1797 unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
1798 unsigned NumOps = (Call.getNumOperands() - 5) / 2;
1799 unsigned PtrByteSize = isPPC64 ? 8 : 4;
1801 // Add up all the space actually used.
1802 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
1803 // they all go in registers, but we must reserve stack space for them for
1804 // possible use by the caller. In varargs or 64-bit calls, parameters are
1805 // assigned stack space in order, with padding so Altivec parameters are
1807 nAltivecParamsAtEnd = 0;
1808 for (unsigned i = 0; i != NumOps; ++i) {
1809 SDOperand Arg = Call.getOperand(5+2*i);
1810 SDOperand Flag = Call.getOperand(5+2*i+1);
1811 MVT ArgVT = Arg.getValueType();
1812 // Varargs Altivec parameters are padded to a 16 byte boundary.
1813 if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
1814 ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) {
1815 if (!isVarArg && !isPPC64) {
1816 // Non-varargs Altivec parameters go after all the non-Altivec
1817 // parameters; handle those later so we know how much padding we need.
1818 nAltivecParamsAtEnd++;
1821 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
1822 NumBytes = ((NumBytes+15)/16)*16;
1824 NumBytes += CalculateStackSlotSize(Arg, Flag, isVarArg, PtrByteSize);
1827 // Allow for Altivec parameters at the end, if needed.
1828 if (nAltivecParamsAtEnd) {
1829 NumBytes = ((NumBytes+15)/16)*16;
1830 NumBytes += 16*nAltivecParamsAtEnd;
1833 // The prolog code of the callee may store up to 8 GPR argument registers to
1834 // the stack, allowing va_start to index over them in memory if its varargs.
1835 // Because we cannot tell if this is needed on the caller side, we have to
1836 // conservatively assume that it is needed. As such, make sure we have at
1837 // least enough stack space for the caller to store the 8 GPRs.
1838 NumBytes = std::max(NumBytes,
1839 PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI));
1841 // Tail call needs the stack to be aligned.
1842 if (CC==CallingConv::Fast && PerformTailCallOpt) {
1843 unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()->
1844 getStackAlignment();
1845 unsigned AlignMask = TargetAlign-1;
1846 NumBytes = (NumBytes + AlignMask) & ~AlignMask;
1852 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
1853 /// adjusted to accomodate the arguments for the tailcall.
1854 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool IsTailCall,
1855 unsigned ParamSize) {
1857 if (!IsTailCall) return 0;
1859 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
1860 unsigned CallerMinReservedArea = FI->getMinReservedArea();
1861 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
1862 // Remember only if the new adjustement is bigger.
1863 if (SPDiff < FI->getTailCallSPDelta())
1864 FI->setTailCallSPDelta(SPDiff);
1869 /// IsEligibleForTailCallElimination - Check to see whether the next instruction
1870 /// following the call is a return. A function is eligible if caller/callee
1871 /// calling conventions match, currently only fastcc supports tail calls, and
1872 /// the function CALL is immediatly followed by a RET.
1874 PPCTargetLowering::IsEligibleForTailCallOptimization(SDOperand Call,
1876 SelectionDAG& DAG) const {
1877 // Variable argument functions are not supported.
1878 if (!PerformTailCallOpt ||
1879 cast<ConstantSDNode>(Call.getOperand(2))->getValue() != 0) return false;
1881 if (CheckTailCallReturnConstraints(Call, Ret)) {
1882 MachineFunction &MF = DAG.getMachineFunction();
1883 unsigned CallerCC = MF.getFunction()->getCallingConv();
1884 unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue();
1885 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
1886 // Functions containing by val parameters are not supported.
1887 for (unsigned i = 0; i != ((Call.getNumOperands()-5)/2); i++) {
1888 ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Call.getOperand(5+2*i+1))
1890 if (Flags.isByVal()) return false;
1893 SDOperand Callee = Call.getOperand(4);
1894 // Non PIC/GOT tail calls are supported.
1895 if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
1898 // At the moment we can only do local tail calls (in same module, hidden
1899 // or protected) if we are generating PIC.
1900 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1901 return G->getGlobal()->hasHiddenVisibility()
1902 || G->getGlobal()->hasProtectedVisibility();
1909 /// isCallCompatibleAddress - Return the immediate to use if the specified
1910 /// 32-bit value is representable in the immediate field of a BxA instruction.
1911 static SDNode *isBLACompatibleAddress(SDOperand Op, SelectionDAG &DAG) {
1912 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1915 int Addr = C->getValue();
1916 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1917 (Addr << 6 >> 6) != Addr)
1918 return 0; // Top 6 bits have to be sext of immediate.
1920 return DAG.getConstant((int)C->getValue() >> 2,
1921 DAG.getTargetLoweringInfo().getPointerTy()).Val;
1926 struct TailCallArgumentInfo {
1928 SDOperand FrameIdxOp;
1931 TailCallArgumentInfo() : FrameIdx(0) {}
1936 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
1938 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
1940 const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs,
1941 SmallVector<SDOperand, 8> &MemOpChains) {
1942 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
1943 SDOperand Arg = TailCallArgs[i].Arg;
1944 SDOperand FIN = TailCallArgs[i].FrameIdxOp;
1945 int FI = TailCallArgs[i].FrameIdx;
1946 // Store relative to framepointer.
1947 MemOpChains.push_back(DAG.getStore(Chain, Arg, FIN,
1948 PseudoSourceValue::getFixedStack(FI),
1953 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
1954 /// the appropriate stack slot for the tail call optimized function call.
1955 static SDOperand EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
1956 MachineFunction &MF,
1958 SDOperand OldRetAddr,
1964 // Calculate the new stack slot for the return address.
1965 int SlotSize = isPPC64 ? 8 : 4;
1966 int NewRetAddrLoc = SPDiff + PPCFrameInfo::getReturnSaveOffset(isPPC64,
1968 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
1970 int NewFPLoc = SPDiff + PPCFrameInfo::getFramePointerSaveOffset(isPPC64,
1972 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc);
1974 MVT VT = isPPC64 ? MVT::i64 : MVT::i32;
1975 SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
1976 Chain = DAG.getStore(Chain, OldRetAddr, NewRetAddrFrIdx,
1977 PseudoSourceValue::getFixedStack(NewRetAddr), 0);
1978 SDOperand NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
1979 Chain = DAG.getStore(Chain, OldFP, NewFramePtrIdx,
1980 PseudoSourceValue::getFixedStack(NewFPIdx), 0);
1985 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
1986 /// the position of the argument.
1988 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
1989 SDOperand Arg, int SPDiff, unsigned ArgOffset,
1990 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
1991 int Offset = ArgOffset + SPDiff;
1992 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
1993 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
1994 MVT VT = isPPC64 ? MVT::i64 : MVT::i32;
1995 SDOperand FIN = DAG.getFrameIndex(FI, VT);
1996 TailCallArgumentInfo Info;
1998 Info.FrameIdxOp = FIN;
2000 TailCallArguments.push_back(Info);
2003 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
2004 /// stack slot. Returns the chain as result and the loaded frame pointers in
2005 /// LROpOut/FPOpout. Used when tail calling.
2006 SDOperand PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
2010 SDOperand &FPOpOut) {
2012 // Load the LR and FP stack slot for later adjusting.
2013 MVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32;
2014 LROpOut = getReturnAddrFrameIndex(DAG);
2015 LROpOut = DAG.getLoad(VT, Chain, LROpOut, NULL, 0);
2016 Chain = SDOperand(LROpOut.Val, 1);
2017 FPOpOut = getFramePointerFrameIndex(DAG);
2018 FPOpOut = DAG.getLoad(VT, Chain, FPOpOut, NULL, 0);
2019 Chain = SDOperand(FPOpOut.Val, 1);
2024 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
2025 /// by "Src" to address "Dst" of size "Size". Alignment information is
2026 /// specified by the specific parameter attribute. The copy will be passed as
2027 /// a byval function parameter.
2028 /// Sometimes what we are copying is the end of a larger object, the part that
2029 /// does not fit in registers.
2031 CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain,
2032 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2034 SDOperand SizeNode = DAG.getConstant(Size, MVT::i32);
2035 return DAG.getMemcpy(Chain, Dst, Src, SizeNode, Flags.getByValAlign(), false,
2039 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
2042 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDOperand Chain,
2043 SDOperand Arg, SDOperand PtrOff, int SPDiff,
2044 unsigned ArgOffset, bool isPPC64, bool isTailCall,
2045 bool isVector, SmallVector<SDOperand, 8> &MemOpChains,
2046 SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
2047 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2052 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
2054 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
2055 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr,
2056 DAG.getConstant(ArgOffset, PtrVT));
2058 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
2059 // Calculate and remember argument location.
2060 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
2064 SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG,
2065 const PPCSubtarget &Subtarget,
2066 TargetMachine &TM) {
2067 SDOperand Chain = Op.getOperand(0);
2068 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
2069 unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
2070 bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 &&
2071 CC == CallingConv::Fast && PerformTailCallOpt;
2072 SDOperand Callee = Op.getOperand(4);
2073 unsigned NumOps = (Op.getNumOperands() - 5) / 2;
2075 bool isMachoABI = Subtarget.isMachoABI();
2076 bool isELF32_ABI = Subtarget.isELF32_ABI();
2078 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2079 bool isPPC64 = PtrVT == MVT::i64;
2080 unsigned PtrByteSize = isPPC64 ? 8 : 4;
2082 MachineFunction &MF = DAG.getMachineFunction();
2084 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in
2085 // SelectExpr to use to put the arguments in the appropriate registers.
2086 std::vector<SDOperand> args_to_use;
2088 // Mark this function as potentially containing a function that contains a
2089 // tail call. As a consequence the frame pointer will be used for dynamicalloc
2090 // and restoring the callers stack pointer in this functions epilog. This is
2091 // done because by tail calling the called function might overwrite the value
2092 // in this function's (MF) stack pointer stack slot 0(SP).
2093 if (PerformTailCallOpt && CC==CallingConv::Fast)
2094 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
2096 unsigned nAltivecParamsAtEnd = 0;
2098 // Count how many bytes are to be pushed on the stack, including the linkage
2099 // area, and parameter passing area. We start with 24/48 bytes, which is
2100 // prereserved space for [SP][CR][LR][3 x unused].
2102 CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isMachoABI, isVarArg, CC,
2103 Op, nAltivecParamsAtEnd);
2105 // Calculate by how many bytes the stack has to be adjusted in case of tail
2106 // call optimization.
2107 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
2109 // Adjust the stack pointer for the new arguments...
2110 // These operations are automatically eliminated by the prolog/epilog pass
2111 Chain = DAG.getCALLSEQ_START(Chain,
2112 DAG.getConstant(NumBytes, PtrVT));
2113 SDOperand CallSeqStart = Chain;
2115 // Load the return address and frame pointer so it can be move somewhere else
2117 SDOperand LROp, FPOp;
2118 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp);
2120 // Set up a copy of the stack pointer for use loading and storing any
2121 // arguments that may not fit in the registers available for argument
2125 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
2127 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
2129 // Figure out which arguments are going to go in registers, and which in
2130 // memory. Also, if this is a vararg function, floating point operations
2131 // must be stored to our stack, and loaded into integer regs as well, if
2132 // any integer regs are available for argument passing.
2133 unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
2134 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
2136 static const unsigned GPR_32[] = { // 32-bit registers.
2137 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
2138 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
2140 static const unsigned GPR_64[] = { // 64-bit registers.
2141 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
2142 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
2144 static const unsigned *FPR = GetFPR(Subtarget);
2146 static const unsigned VR[] = {
2147 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
2148 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
2150 const unsigned NumGPRs = array_lengthof(GPR_32);
2151 const unsigned NumFPRs = isMachoABI ? 13 : 8;
2152 const unsigned NumVRs = array_lengthof( VR);
2154 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
2156 std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
2157 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
2159 SmallVector<SDOperand, 8> MemOpChains;
2160 for (unsigned i = 0; i != NumOps; ++i) {
2162 SDOperand Arg = Op.getOperand(5+2*i);
2163 ISD::ArgFlagsTy Flags =
2164 cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags();
2165 // See if next argument requires stack alignment in ELF
2166 bool Align = Flags.isSplit();
2168 // PtrOff will be used to store the current argument to the stack if a
2169 // register cannot be found for it.
2172 // Stack align in ELF 32
2173 if (isELF32_ABI && Align)
2174 PtrOff = DAG.getConstant(ArgOffset + ((ArgOffset/4) % 2) * PtrByteSize,
2175 StackPtr.getValueType());
2177 PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
2179 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff);
2181 // On PPC64, promote integers to 64-bit values.
2182 if (isPPC64 && Arg.getValueType() == MVT::i32) {
2183 // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
2184 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
2185 Arg = DAG.getNode(ExtOp, MVT::i64, Arg);
2188 // FIXME Elf untested, what are alignment rules?
2189 // FIXME memcpy is used way more than necessary. Correctness first.
2190 if (Flags.isByVal()) {
2191 unsigned Size = Flags.getByValSize();
2192 if (isELF32_ABI && Align) GPR_idx += (GPR_idx % 2);
2193 if (Size==1 || Size==2) {
2194 // Very small objects are passed right-justified.
2195 // Everything else is passed left-justified.
2196 MVT VT = (Size==1) ? MVT::i8 : MVT::i16;
2197 if (GPR_idx != NumGPRs) {
2198 SDOperand Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, Chain, Arg,
2200 MemOpChains.push_back(Load.getValue(1));
2201 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
2203 ArgOffset += PtrByteSize;
2205 SDOperand Const = DAG.getConstant(4 - Size, PtrOff.getValueType());
2206 SDOperand AddPtr = DAG.getNode(ISD::ADD, PtrVT, PtrOff, Const);
2207 SDOperand MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr,
2208 CallSeqStart.Val->getOperand(0),
2210 // This must go outside the CALLSEQ_START..END.
2211 SDOperand NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
2212 CallSeqStart.Val->getOperand(1));
2213 DAG.ReplaceAllUsesWith(CallSeqStart.Val, NewCallSeqStart.Val);
2214 Chain = CallSeqStart = NewCallSeqStart;
2215 ArgOffset += PtrByteSize;
2219 // Copy entire object into memory. There are cases where gcc-generated
2220 // code assumes it is there, even if it could be put entirely into
2221 // registers. (This is not what the doc says.)
2222 SDOperand MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
2223 CallSeqStart.Val->getOperand(0),
2225 // This must go outside the CALLSEQ_START..END.
2226 SDOperand NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
2227 CallSeqStart.Val->getOperand(1));
2228 DAG.ReplaceAllUsesWith(CallSeqStart.Val, NewCallSeqStart.Val);
2229 Chain = CallSeqStart = NewCallSeqStart;
2230 // And copy the pieces of it that fit into registers.
2231 for (unsigned j=0; j<Size; j+=PtrByteSize) {
2232 SDOperand Const = DAG.getConstant(j, PtrOff.getValueType());
2233 SDOperand AddArg = DAG.getNode(ISD::ADD, PtrVT, Arg, Const);
2234 if (GPR_idx != NumGPRs) {
2235 SDOperand Load = DAG.getLoad(PtrVT, Chain, AddArg, NULL, 0);
2236 MemOpChains.push_back(Load.getValue(1));
2237 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
2239 ArgOffset += PtrByteSize;
2241 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
2248 switch (Arg.getValueType().getSimpleVT()) {
2249 default: assert(0 && "Unexpected ValueType for argument!");
2252 // Double word align in ELF
2253 if (isELF32_ABI && Align) GPR_idx += (GPR_idx % 2);
2254 if (GPR_idx != NumGPRs) {
2255 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
2257 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
2258 isPPC64, isTailCall, false, MemOpChains,
2262 if (inMem || isMachoABI) {
2263 // Stack align in ELF
2264 if (isELF32_ABI && Align)
2265 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
2267 ArgOffset += PtrByteSize;
2272 if (FPR_idx != NumFPRs) {
2273 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
2276 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
2277 MemOpChains.push_back(Store);
2279 // Float varargs are always shadowed in available integer registers
2280 if (GPR_idx != NumGPRs) {
2281 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0);
2282 MemOpChains.push_back(Load.getValue(1));
2283 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++],
2286 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
2287 SDOperand ConstFour = DAG.getConstant(4, PtrOff.getValueType());
2288 PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour);
2289 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0);
2290 MemOpChains.push_back(Load.getValue(1));
2291 if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++],
2295 // If we have any FPRs remaining, we may also have GPRs remaining.
2296 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
2299 if (GPR_idx != NumGPRs)
2301 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
2302 !isPPC64) // PPC64 has 64-bit GPR's obviously :)
2307 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
2308 isPPC64, isTailCall, false, MemOpChains,
2312 if (inMem || isMachoABI) {
2313 // Stack align in ELF
2314 if (isELF32_ABI && Align)
2315 ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
2319 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
2327 // These go aligned on the stack, or in the corresponding R registers
2328 // when within range. The Darwin PPC ABI doc claims they also go in
2329 // V registers; in fact gcc does this only for arguments that are
2330 // prototyped, not for those that match the ... We do it for all
2331 // arguments, seems to work.
2332 while (ArgOffset % 16 !=0) {
2333 ArgOffset += PtrByteSize;
2334 if (GPR_idx != NumGPRs)
2337 // We could elide this store in the case where the object fits
2338 // entirely in R registers. Maybe later.
2339 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr,
2340 DAG.getConstant(ArgOffset, PtrVT));
2341 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
2342 MemOpChains.push_back(Store);
2343 if (VR_idx != NumVRs) {
2344 SDOperand Load = DAG.getLoad(MVT::v4f32, Store, PtrOff, NULL, 0);
2345 MemOpChains.push_back(Load.getValue(1));
2346 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
2349 for (unsigned i=0; i<16; i+=PtrByteSize) {
2350 if (GPR_idx == NumGPRs)
2352 SDOperand Ix = DAG.getNode(ISD::ADD, PtrVT, PtrOff,
2353 DAG.getConstant(i, PtrVT));
2354 SDOperand Load = DAG.getLoad(PtrVT, Store, Ix, NULL, 0);
2355 MemOpChains.push_back(Load.getValue(1));
2356 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
2361 // Non-varargs Altivec params generally go in registers, but have
2362 // stack space allocated at the end.
2363 if (VR_idx != NumVRs) {
2364 // Doesn't have GPR space allocated.
2365 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
2366 } else if (nAltivecParamsAtEnd==0) {
2367 // We are emitting Altivec params in order.
2368 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
2369 isPPC64, isTailCall, true, MemOpChains,
2376 // If all Altivec parameters fit in registers, as they usually do,
2377 // they get stack space following the non-Altivec parameters. We
2378 // don't track this here because nobody below needs it.
2379 // If there are more Altivec parameters than fit in registers emit
2381 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
2383 // Offset is aligned; skip 1st 12 params which go in V registers.
2384 ArgOffset = ((ArgOffset+15)/16)*16;
2386 for (unsigned i = 0; i != NumOps; ++i) {
2387 SDOperand Arg = Op.getOperand(5+2*i);
2388 MVT ArgType = Arg.getValueType();
2389 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
2390 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
2393 // We are emitting Altivec params in order.
2394 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
2395 isPPC64, isTailCall, true, MemOpChains,
2403 if (!MemOpChains.empty())
2404 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
2405 &MemOpChains[0], MemOpChains.size());
2407 // Build a sequence of copy-to-reg nodes chained together with token chain
2408 // and flag operands which copy the outgoing args into the appropriate regs.
2410 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2411 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
2413 InFlag = Chain.getValue(1);
2416 // With the ELF 32 ABI, set CR6 to true if this is a vararg call.
2417 if (isVarArg && isELF32_ABI) {
2418 SDOperand SetCR(DAG.getTargetNode(PPC::CRSET, MVT::i32), 0);
2419 Chain = DAG.getCopyToReg(Chain, PPC::CR1EQ, SetCR, InFlag);
2420 InFlag = Chain.getValue(1);
2423 // Emit a sequence of copyto/copyfrom virtual registers for arguments that
2424 // might overwrite each other in case of tail call optimization.
2426 SmallVector<SDOperand, 8> MemOpChains2;
2427 // Do not flag preceeding copytoreg stuff together with the following stuff.
2428 InFlag = SDOperand();
2429 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
2431 if (!MemOpChains2.empty())
2432 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
2433 &MemOpChains2[0], MemOpChains2.size());
2435 // Store the return address to the appropriate stack slot.
2436 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff,
2437 isPPC64, isMachoABI);
2440 // Emit callseq_end just before tailcall node.
2442 SmallVector<SDOperand, 8> CallSeqOps;
2443 SDVTList CallSeqNodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
2444 CallSeqOps.push_back(Chain);
2445 CallSeqOps.push_back(DAG.getIntPtrConstant(NumBytes));
2446 CallSeqOps.push_back(DAG.getIntPtrConstant(0));
2448 CallSeqOps.push_back(InFlag);
2449 Chain = DAG.getNode(ISD::CALLSEQ_END, CallSeqNodeTys, &CallSeqOps[0],
2451 InFlag = Chain.getValue(1);
2454 std::vector<MVT> NodeTys;
2455 NodeTys.push_back(MVT::Other); // Returns a chain
2456 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
2458 SmallVector<SDOperand, 8> Ops;
2459 unsigned CallOpc = isMachoABI? PPCISD::CALL_Macho : PPCISD::CALL_ELF;
2461 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
2462 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
2463 // node so that legalize doesn't hack it.
2464 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
2465 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType());
2466 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
2467 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType());
2468 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
2469 // If this is an absolute destination address, use the munged value.
2470 Callee = SDOperand(Dest, 0);
2472 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair
2473 // to do the call, we can't use PPCISD::CALL.
2474 SDOperand MTCTROps[] = {Chain, Callee, InFlag};
2475 Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, MTCTROps, 2+(InFlag.Val!=0));
2476 InFlag = Chain.getValue(1);
2478 // Copy the callee address into R12/X12 on darwin.
2480 unsigned Reg = Callee.getValueType() == MVT::i32 ? PPC::R12 : PPC::X12;
2481 Chain = DAG.getCopyToReg(Chain, Reg, Callee, InFlag);
2482 InFlag = Chain.getValue(1);
2486 NodeTys.push_back(MVT::Other);
2487 NodeTys.push_back(MVT::Flag);
2488 Ops.push_back(Chain);
2489 CallOpc = isMachoABI ? PPCISD::BCTRL_Macho : PPCISD::BCTRL_ELF;
2491 // Add CTR register as callee so a bctr can be emitted later.
2493 Ops.push_back(DAG.getRegister(PPC::CTR, getPointerTy()));
2496 // If this is a direct call, pass the chain and the callee.
2498 Ops.push_back(Chain);
2499 Ops.push_back(Callee);
2501 // If this is a tail call add stack pointer delta.
2503 Ops.push_back(DAG.getConstant(SPDiff, MVT::i32));
2505 // Add argument registers to the end of the list so that they are known live
2507 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2508 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2509 RegsToPass[i].second.getValueType()));
2511 // When performing tail call optimization the callee pops its arguments off
2512 // the stack. Account for this here so these bytes can be pushed back on in
2513 // PPCRegisterInfo::eliminateCallFramePseudoInstr.
2514 int BytesCalleePops =
2515 (CC==CallingConv::Fast && PerformTailCallOpt) ? NumBytes : 0;
2518 Ops.push_back(InFlag);
2522 assert(InFlag.Val &&
2523 "Flag must be set. Depend on flag being set in LowerRET");
2524 Chain = DAG.getNode(PPCISD::TAILCALL,
2525 Op.Val->getVTList(), &Ops[0], Ops.size());
2526 return SDOperand(Chain.Val, Op.ResNo);
2529 Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size());
2530 InFlag = Chain.getValue(1);
2532 Chain = DAG.getCALLSEQ_END(Chain,
2533 DAG.getConstant(NumBytes, PtrVT),
2534 DAG.getConstant(BytesCalleePops, PtrVT),
2536 if (Op.Val->getValueType(0) != MVT::Other)
2537 InFlag = Chain.getValue(1);
2539 SmallVector<SDOperand, 16> ResultVals;
2540 SmallVector<CCValAssign, 16> RVLocs;
2541 unsigned CallerCC = DAG.getMachineFunction().getFunction()->getCallingConv();
2542 CCState CCInfo(CallerCC, isVarArg, TM, RVLocs);
2543 CCInfo.AnalyzeCallResult(Op.Val, RetCC_PPC);
2545 // Copy all of the result registers out of their specified physreg.
2546 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2547 CCValAssign &VA = RVLocs[i];
2548 MVT VT = VA.getValVT();
2549 assert(VA.isRegLoc() && "Can only return in registers!");
2550 Chain = DAG.getCopyFromReg(Chain, VA.getLocReg(), VT, InFlag).getValue(1);
2551 ResultVals.push_back(Chain.getValue(0));
2552 InFlag = Chain.getValue(2);
2555 // If the function returns void, just return the chain.
2559 // Otherwise, merge everything together with a MERGE_VALUES node.
2560 ResultVals.push_back(Chain);
2561 SDOperand Res = DAG.getMergeValues(Op.Val->getVTList(), &ResultVals[0],
2563 return Res.getValue(Op.ResNo);
2566 SDOperand PPCTargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG,
2567 TargetMachine &TM) {
2568 SmallVector<CCValAssign, 16> RVLocs;
2569 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
2570 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
2571 CCState CCInfo(CC, isVarArg, TM, RVLocs);
2572 CCInfo.AnalyzeReturn(Op.Val, RetCC_PPC);
2574 // If this is the first return lowered for this function, add the regs to the
2575 // liveout set for the function.
2576 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
2577 for (unsigned i = 0; i != RVLocs.size(); ++i)
2578 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
2581 SDOperand Chain = Op.getOperand(0);
2583 Chain = GetPossiblePreceedingTailCall(Chain, PPCISD::TAILCALL);
2584 if (Chain.getOpcode() == PPCISD::TAILCALL) {
2585 SDOperand TailCall = Chain;
2586 SDOperand TargetAddress = TailCall.getOperand(1);
2587 SDOperand StackAdjustment = TailCall.getOperand(2);
2589 assert(((TargetAddress.getOpcode() == ISD::Register &&
2590 cast<RegisterSDNode>(TargetAddress)->getReg() == PPC::CTR) ||
2591 TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
2592 TargetAddress.getOpcode() == ISD::TargetGlobalAddress ||
2593 isa<ConstantSDNode>(TargetAddress)) &&
2594 "Expecting an global address, external symbol, absolute value or register");
2596 assert(StackAdjustment.getOpcode() == ISD::Constant &&
2597 "Expecting a const value");
2599 SmallVector<SDOperand,8> Operands;
2600 Operands.push_back(Chain.getOperand(0));
2601 Operands.push_back(TargetAddress);
2602 Operands.push_back(StackAdjustment);
2603 // Copy registers used by the call. Last operand is a flag so it is not
2605 for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
2606 Operands.push_back(Chain.getOperand(i));
2608 return DAG.getNode(PPCISD::TC_RETURN, MVT::Other, &Operands[0],
2614 // Copy the result values into the output registers.
2615 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2616 CCValAssign &VA = RVLocs[i];
2617 assert(VA.isRegLoc() && "Can only return in registers!");
2618 Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), Op.getOperand(i*2+1), Flag);
2619 Flag = Chain.getValue(1);
2623 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain, Flag);
2625 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain);
2628 SDOperand PPCTargetLowering::LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG,
2629 const PPCSubtarget &Subtarget) {
2630 // When we pop the dynamic allocation we need to restore the SP link.
2632 // Get the corect type for pointers.
2633 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2635 // Construct the stack pointer operand.
2636 bool IsPPC64 = Subtarget.isPPC64();
2637 unsigned SP = IsPPC64 ? PPC::X1 : PPC::R1;
2638 SDOperand StackPtr = DAG.getRegister(SP, PtrVT);
2640 // Get the operands for the STACKRESTORE.
2641 SDOperand Chain = Op.getOperand(0);
2642 SDOperand SaveSP = Op.getOperand(1);
2644 // Load the old link SP.
2645 SDOperand LoadLinkSP = DAG.getLoad(PtrVT, Chain, StackPtr, NULL, 0);
2647 // Restore the stack pointer.
2648 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), SP, SaveSP);
2650 // Store the old link SP.
2651 return DAG.getStore(Chain, LoadLinkSP, StackPtr, NULL, 0);
2657 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
2658 MachineFunction &MF = DAG.getMachineFunction();
2659 bool IsPPC64 = PPCSubTarget.isPPC64();
2660 bool isMachoABI = PPCSubTarget.isMachoABI();
2661 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2663 // Get current frame pointer save index. The users of this index will be
2664 // primarily DYNALLOC instructions.
2665 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
2666 int RASI = FI->getReturnAddrSaveIndex();
2668 // If the frame pointer save index hasn't been defined yet.
2670 // Find out what the fix offset of the frame pointer save area.
2671 int LROffset = PPCFrameInfo::getReturnSaveOffset(IsPPC64, isMachoABI);
2672 // Allocate the frame index for frame pointer save area.
2673 RASI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, LROffset);
2675 FI->setReturnAddrSaveIndex(RASI);
2677 return DAG.getFrameIndex(RASI, PtrVT);
2681 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
2682 MachineFunction &MF = DAG.getMachineFunction();
2683 bool IsPPC64 = PPCSubTarget.isPPC64();
2684 bool isMachoABI = PPCSubTarget.isMachoABI();
2685 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2687 // Get current frame pointer save index. The users of this index will be
2688 // primarily DYNALLOC instructions.
2689 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
2690 int FPSI = FI->getFramePointerSaveIndex();
2692 // If the frame pointer save index hasn't been defined yet.
2694 // Find out what the fix offset of the frame pointer save area.
2695 int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, isMachoABI);
2697 // Allocate the frame index for frame pointer save area.
2698 FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, FPOffset);
2700 FI->setFramePointerSaveIndex(FPSI);
2702 return DAG.getFrameIndex(FPSI, PtrVT);
2705 SDOperand PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op,
2707 const PPCSubtarget &Subtarget) {
2709 SDOperand Chain = Op.getOperand(0);
2710 SDOperand Size = Op.getOperand(1);
2712 // Get the corect type for pointers.
2713 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2715 SDOperand NegSize = DAG.getNode(ISD::SUB, PtrVT,
2716 DAG.getConstant(0, PtrVT), Size);
2717 // Construct a node for the frame pointer save index.
2718 SDOperand FPSIdx = getFramePointerFrameIndex(DAG);
2719 // Build a DYNALLOC node.
2720 SDOperand Ops[3] = { Chain, NegSize, FPSIdx };
2721 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
2722 return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3);
2725 SDOperand PPCTargetLowering::LowerAtomicLOAD_ADD(SDOperand Op, SelectionDAG &DAG) {
2726 MVT VT = Op.Val->getValueType(0);
2727 SDOperand Chain = Op.getOperand(0);
2728 SDOperand Ptr = Op.getOperand(1);
2729 SDOperand Incr = Op.getOperand(2);
2731 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
2737 return DAG.getNode(PPCISD::ATOMIC_LOAD_ADD, VTs, Ops, 3);
2740 SDOperand PPCTargetLowering::LowerAtomicCMP_SWAP(SDOperand Op, SelectionDAG &DAG) {
2741 MVT VT = Op.Val->getValueType(0);
2742 SDOperand Chain = Op.getOperand(0);
2743 SDOperand Ptr = Op.getOperand(1);
2744 SDOperand NewVal = Op.getOperand(2);
2745 SDOperand OldVal = Op.getOperand(3);
2747 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
2754 return DAG.getNode(PPCISD::ATOMIC_CMP_SWAP, VTs, Ops, 4);
2757 SDOperand PPCTargetLowering::LowerAtomicSWAP(SDOperand Op, SelectionDAG &DAG) {
2758 MVT VT = Op.Val->getValueType(0);
2759 SDOperand Chain = Op.getOperand(0);
2760 SDOperand Ptr = Op.getOperand(1);
2761 SDOperand NewVal = Op.getOperand(2);
2763 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
2769 return DAG.getNode(PPCISD::ATOMIC_SWAP, VTs, Ops, 3);
2772 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
2774 SDOperand PPCTargetLowering::LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) {
2775 // Not FP? Not a fsel.
2776 if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
2777 !Op.getOperand(2).getValueType().isFloatingPoint())
2780 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2782 // Cannot handle SETEQ/SETNE.
2783 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand();
2785 MVT ResVT = Op.getValueType();
2786 MVT CmpVT = Op.getOperand(0).getValueType();
2787 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
2788 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3);
2790 // If the RHS of the comparison is a 0.0, we don't need to do the
2791 // subtraction at all.
2792 if (isFloatingPointZero(RHS))
2794 default: break; // SETUO etc aren't handled by fsel.
2798 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
2802 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
2803 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
2804 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV);
2808 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
2812 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
2813 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
2814 return DAG.getNode(PPCISD::FSEL, ResVT,
2815 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV);
2820 default: break; // SETUO etc aren't handled by fsel.
2824 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
2825 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
2826 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
2827 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
2831 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
2832 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
2833 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
2834 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
2838 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
2839 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
2840 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
2841 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
2845 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
2846 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
2847 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
2848 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
2853 // FIXME: Split this code up when LegalizeDAGTypes lands.
2854 SDOperand PPCTargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
2855 assert(Op.getOperand(0).getValueType().isFloatingPoint());
2856 SDOperand Src = Op.getOperand(0);
2857 if (Src.getValueType() == MVT::f32)
2858 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src);
2861 switch (Op.getValueType().getSimpleVT()) {
2862 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!");
2864 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src);
2867 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src);
2871 // Convert the FP value to an int value through memory.
2872 SDOperand FIPtr = DAG.CreateStackTemporary(MVT::f64);
2874 // Emit a store to the stack slot.
2875 SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Tmp, FIPtr, NULL, 0);
2877 // Result is a load from the stack slot. If loading 4 bytes, make sure to
2879 if (Op.getValueType() == MVT::i32)
2880 FIPtr = DAG.getNode(ISD::ADD, FIPtr.getValueType(), FIPtr,
2881 DAG.getConstant(4, FIPtr.getValueType()));
2882 return DAG.getLoad(Op.getValueType(), Chain, FIPtr, NULL, 0);
2885 SDOperand PPCTargetLowering::LowerFP_ROUND_INREG(SDOperand Op,
2886 SelectionDAG &DAG) {
2887 assert(Op.getValueType() == MVT::ppcf128);
2888 SDNode *Node = Op.Val;
2889 assert(Node->getOperand(0).getValueType() == MVT::ppcf128);
2890 assert(Node->getOperand(0).Val->getOpcode() == ISD::BUILD_PAIR);
2891 SDOperand Lo = Node->getOperand(0).Val->getOperand(0);
2892 SDOperand Hi = Node->getOperand(0).Val->getOperand(1);
2894 // This sequence changes FPSCR to do round-to-zero, adds the two halves
2895 // of the long double, and puts FPSCR back the way it was. We do not
2896 // actually model FPSCR.
2897 std::vector<MVT> NodeTys;
2898 SDOperand Ops[4], Result, MFFSreg, InFlag, FPreg;
2900 NodeTys.push_back(MVT::f64); // Return register
2901 NodeTys.push_back(MVT::Flag); // Returns a flag for later insns
2902 Result = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0);
2903 MFFSreg = Result.getValue(0);
2904 InFlag = Result.getValue(1);
2907 NodeTys.push_back(MVT::Flag); // Returns a flag
2908 Ops[0] = DAG.getConstant(31, MVT::i32);
2910 Result = DAG.getNode(PPCISD::MTFSB1, NodeTys, Ops, 2);
2911 InFlag = Result.getValue(0);
2914 NodeTys.push_back(MVT::Flag); // Returns a flag
2915 Ops[0] = DAG.getConstant(30, MVT::i32);
2917 Result = DAG.getNode(PPCISD::MTFSB0, NodeTys, Ops, 2);
2918 InFlag = Result.getValue(0);
2921 NodeTys.push_back(MVT::f64); // result of add
2922 NodeTys.push_back(MVT::Flag); // Returns a flag
2926 Result = DAG.getNode(PPCISD::FADDRTZ, NodeTys, Ops, 3);
2927 FPreg = Result.getValue(0);
2928 InFlag = Result.getValue(1);
2931 NodeTys.push_back(MVT::f64);
2932 Ops[0] = DAG.getConstant(1, MVT::i32);
2936 Result = DAG.getNode(PPCISD::MTFSF, NodeTys, Ops, 4);
2937 FPreg = Result.getValue(0);
2939 // We know the low half is about to be thrown away, so just use something
2941 return DAG.getNode(ISD::BUILD_PAIR, Lo.getValueType(), FPreg, FPreg);
2944 SDOperand PPCTargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
2945 // Don't handle ppc_fp128 here; let it be lowered to a libcall.
2946 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
2949 if (Op.getOperand(0).getValueType() == MVT::i64) {
2950 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0));
2951 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits);
2952 if (Op.getValueType() == MVT::f32)
2953 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0));
2957 assert(Op.getOperand(0).getValueType() == MVT::i32 &&
2958 "Unhandled SINT_TO_FP type in custom expander!");
2959 // Since we only generate this in 64-bit mode, we can take advantage of
2960 // 64-bit registers. In particular, sign extend the input value into the
2961 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
2962 // then lfd it and fcfid it.
2963 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
2964 int FrameIdx = FrameInfo->CreateStackObject(8, 8);
2965 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2966 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
2968 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32,
2971 // STD the extended value into the stack slot.
2972 MachineMemOperand MO(PseudoSourceValue::getFixedStack(FrameIdx),
2973 MachineMemOperand::MOStore, 0, 8, 8);
2974 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other,
2975 DAG.getEntryNode(), Ext64, FIdx,
2976 DAG.getMemOperand(MO));
2977 // Load the value as a double.
2978 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0);
2980 // FCFID it and return it.
2981 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld);
2982 if (Op.getValueType() == MVT::f32)
2983 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0));
2987 SDOperand PPCTargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) {
2989 The rounding mode is in bits 30:31 of FPSR, and has the following
2996 FLT_ROUNDS, on the other hand, expects the following:
3003 To perform the conversion, we do:
3004 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
3007 MachineFunction &MF = DAG.getMachineFunction();
3008 MVT VT = Op.getValueType();
3009 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
3010 std::vector<MVT> NodeTys;
3011 SDOperand MFFSreg, InFlag;
3013 // Save FP Control Word to register
3014 NodeTys.push_back(MVT::f64); // return register
3015 NodeTys.push_back(MVT::Flag); // unused in this context
3016 SDOperand Chain = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0);
3018 // Save FP register to stack slot
3019 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
3020 SDOperand StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
3021 SDOperand Store = DAG.getStore(DAG.getEntryNode(), Chain,
3022 StackSlot, NULL, 0);
3024 // Load FP Control Word from low 32 bits of stack slot.
3025 SDOperand Four = DAG.getConstant(4, PtrVT);
3026 SDOperand Addr = DAG.getNode(ISD::ADD, PtrVT, StackSlot, Four);
3027 SDOperand CWD = DAG.getLoad(MVT::i32, Store, Addr, NULL, 0);
3029 // Transform as necessary
3031 DAG.getNode(ISD::AND, MVT::i32,
3032 CWD, DAG.getConstant(3, MVT::i32));
3034 DAG.getNode(ISD::SRL, MVT::i32,
3035 DAG.getNode(ISD::AND, MVT::i32,
3036 DAG.getNode(ISD::XOR, MVT::i32,
3037 CWD, DAG.getConstant(3, MVT::i32)),
3038 DAG.getConstant(3, MVT::i32)),
3039 DAG.getConstant(1, MVT::i8));
3042 DAG.getNode(ISD::XOR, MVT::i32, CWD1, CWD2);
3044 return DAG.getNode((VT.getSizeInBits() < 16 ?
3045 ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal);
3048 SDOperand PPCTargetLowering::LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) {
3049 MVT VT = Op.getValueType();
3050 unsigned BitWidth = VT.getSizeInBits();
3051 assert(Op.getNumOperands() == 3 &&
3052 VT == Op.getOperand(1).getValueType() &&
3055 // Expand into a bunch of logical ops. Note that these ops
3056 // depend on the PPC behavior for oversized shift amounts.
3057 SDOperand Lo = Op.getOperand(0);
3058 SDOperand Hi = Op.getOperand(1);
3059 SDOperand Amt = Op.getOperand(2);
3060 MVT AmtVT = Amt.getValueType();
3062 SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT,
3063 DAG.getConstant(BitWidth, AmtVT), Amt);
3064 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, VT, Hi, Amt);
3065 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, VT, Lo, Tmp1);
3066 SDOperand Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3);
3067 SDOperand Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt,
3068 DAG.getConstant(-BitWidth, AmtVT));
3069 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, VT, Lo, Tmp5);
3070 SDOperand OutHi = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6);
3071 SDOperand OutLo = DAG.getNode(PPCISD::SHL, VT, Lo, Amt);
3072 SDOperand OutOps[] = { OutLo, OutHi };
3073 return DAG.getMergeValues(OutOps, 2);
3076 SDOperand PPCTargetLowering::LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) {
3077 MVT VT = Op.getValueType();
3078 unsigned BitWidth = VT.getSizeInBits();
3079 assert(Op.getNumOperands() == 3 &&
3080 VT == Op.getOperand(1).getValueType() &&
3083 // Expand into a bunch of logical ops. Note that these ops
3084 // depend on the PPC behavior for oversized shift amounts.
3085 SDOperand Lo = Op.getOperand(0);
3086 SDOperand Hi = Op.getOperand(1);
3087 SDOperand Amt = Op.getOperand(2);
3088 MVT AmtVT = Amt.getValueType();
3090 SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT,
3091 DAG.getConstant(BitWidth, AmtVT), Amt);
3092 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt);
3093 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1);
3094 SDOperand Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3);
3095 SDOperand Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt,
3096 DAG.getConstant(-BitWidth, AmtVT));
3097 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, VT, Hi, Tmp5);
3098 SDOperand OutLo = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6);
3099 SDOperand OutHi = DAG.getNode(PPCISD::SRL, VT, Hi, Amt);
3100 SDOperand OutOps[] = { OutLo, OutHi };
3101 return DAG.getMergeValues(OutOps, 2);
3104 SDOperand PPCTargetLowering::LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) {
3105 MVT VT = Op.getValueType();
3106 unsigned BitWidth = VT.getSizeInBits();
3107 assert(Op.getNumOperands() == 3 &&
3108 VT == Op.getOperand(1).getValueType() &&
3111 // Expand into a bunch of logical ops, followed by a select_cc.
3112 SDOperand Lo = Op.getOperand(0);
3113 SDOperand Hi = Op.getOperand(1);
3114 SDOperand Amt = Op.getOperand(2);
3115 MVT AmtVT = Amt.getValueType();
3117 SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT,
3118 DAG.getConstant(BitWidth, AmtVT), Amt);
3119 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt);
3120 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1);
3121 SDOperand Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3);
3122 SDOperand Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt,
3123 DAG.getConstant(-BitWidth, AmtVT));
3124 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, VT, Hi, Tmp5);
3125 SDOperand OutHi = DAG.getNode(PPCISD::SRA, VT, Hi, Amt);
3126 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, AmtVT),
3127 Tmp4, Tmp6, ISD::SETLE);
3128 SDOperand OutOps[] = { OutLo, OutHi };
3129 return DAG.getMergeValues(OutOps, 2);
3132 //===----------------------------------------------------------------------===//
3133 // Vector related lowering.
3136 // If this is a vector of constants or undefs, get the bits. A bit in
3137 // UndefBits is set if the corresponding element of the vector is an
3138 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
3139 // zero. Return true if this is not an array of constants, false if it is.
3141 static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2],
3142 uint64_t UndefBits[2]) {
3143 // Start with zero'd results.
3144 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0;
3146 unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits();
3147 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
3148 SDOperand OpVal = BV->getOperand(i);
3150 unsigned PartNo = i >= e/2; // In the upper 128 bits?
3151 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t.
3153 uint64_t EltBits = 0;
3154 if (OpVal.getOpcode() == ISD::UNDEF) {
3155 uint64_t EltUndefBits = ~0U >> (32-EltBitSize);
3156 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize);
3158 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
3159 EltBits = CN->getValue() & (~0U >> (32-EltBitSize));
3160 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
3161 assert(CN->getValueType(0) == MVT::f32 &&
3162 "Only one legal FP vector type!");
3163 EltBits = FloatToBits(CN->getValueAPF().convertToFloat());
3165 // Nonconstant element.
3169 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize);
3172 //printf("%llx %llx %llx %llx\n",
3173 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]);
3177 // If this is a splat (repetition) of a value across the whole vector, return
3178 // the smallest size that splats it. For example, "0x01010101010101..." is a
3179 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
3180 // SplatSize = 1 byte.
3181 static bool isConstantSplat(const uint64_t Bits128[2],
3182 const uint64_t Undef128[2],
3183 unsigned &SplatBits, unsigned &SplatUndef,
3184 unsigned &SplatSize) {
3186 // Don't let undefs prevent splats from matching. See if the top 64-bits are
3187 // the same as the lower 64-bits, ignoring undefs.
3188 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0]))
3189 return false; // Can't be a splat if two pieces don't match.
3191 uint64_t Bits64 = Bits128[0] | Bits128[1];
3192 uint64_t Undef64 = Undef128[0] & Undef128[1];
3194 // Check that the top 32-bits are the same as the lower 32-bits, ignoring
3196 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64))
3197 return false; // Can't be a splat if two pieces don't match.
3199 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32);
3200 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32);
3202 // If the top 16-bits are different than the lower 16-bits, ignoring
3203 // undefs, we have an i32 splat.
3204 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) {
3206 SplatUndef = Undef32;
3211 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16);
3212 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16);
3214 // If the top 8-bits are different than the lower 8-bits, ignoring
3215 // undefs, we have an i16 splat.
3216 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) {
3218 SplatUndef = Undef16;
3223 // Otherwise, we have an 8-bit splat.
3224 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8);
3225 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
3230 /// BuildSplatI - Build a canonical splati of Val with an element size of
3231 /// SplatSize. Cast the result to VT.
3232 static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT VT,
3233 SelectionDAG &DAG) {
3234 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
3236 static const MVT VTys[] = { // canonical VT to use for each size.
3237 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
3240 MVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
3242 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
3246 MVT CanonicalVT = VTys[SplatSize-1];
3248 // Build a canonical splat for this value.
3249 SDOperand Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType());
3250 SmallVector<SDOperand, 8> Ops;
3251 Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
3252 SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT,
3253 &Ops[0], Ops.size());
3254 return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res);
3257 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
3258 /// specified intrinsic ID.
3259 static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS,
3261 MVT DestVT = MVT::Other) {
3262 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
3263 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
3264 DAG.getConstant(IID, MVT::i32), LHS, RHS);
3267 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
3268 /// specified intrinsic ID.
3269 static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1,
3270 SDOperand Op2, SelectionDAG &DAG,
3271 MVT DestVT = MVT::Other) {
3272 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
3273 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
3274 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
3278 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
3279 /// amount. The result has the specified value type.
3280 static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt,
3281 MVT VT, SelectionDAG &DAG) {
3282 // Force LHS/RHS to be the right type.
3283 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS);
3284 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS);
3287 for (unsigned i = 0; i != 16; ++i)
3288 Ops[i] = DAG.getConstant(i+Amt, MVT::i8);
3289 SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS,
3290 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16));
3291 return DAG.getNode(ISD::BIT_CONVERT, VT, T);
3294 // If this is a case we can't handle, return null and let the default
3295 // expansion code take care of it. If we CAN select this case, and if it
3296 // selects to a single instruction, return Op. Otherwise, if we can codegen
3297 // this case more efficiently than a constant pool load, lower it to the
3298 // sequence of ops that should be used.
3299 SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op,
3300 SelectionDAG &DAG) {
3301 // If this is a vector of constants or undefs, get the bits. A bit in
3302 // UndefBits is set if the corresponding element of the vector is an
3303 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
3305 uint64_t VectorBits[2];
3306 uint64_t UndefBits[2];
3307 if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits))
3308 return SDOperand(); // Not a constant vector.
3310 // If this is a splat (repetition) of a value across the whole vector, return
3311 // the smallest size that splats it. For example, "0x01010101010101..." is a
3312 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
3313 // SplatSize = 1 byte.
3314 unsigned SplatBits, SplatUndef, SplatSize;
3315 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){
3316 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0;
3318 // First, handle single instruction cases.
3321 if (SplatBits == 0) {
3322 // Canonicalize all zero vectors to be v4i32.
3323 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
3324 SDOperand Z = DAG.getConstant(0, MVT::i32);
3325 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z);
3326 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z);
3331 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
3332 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize);
3333 if (SextVal >= -16 && SextVal <= 15)
3334 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG);
3337 // Two instruction sequences.
3339 // If this value is in the range [-32,30] and is even, use:
3340 // tmp = VSPLTI[bhw], result = add tmp, tmp
3341 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
3342 SDOperand Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG);
3343 Res = DAG.getNode(ISD::ADD, Res.getValueType(), Res, Res);
3344 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
3347 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
3348 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
3350 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
3351 // Make -1 and vspltisw -1:
3352 SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG);
3354 // Make the VSLW intrinsic, computing 0x8000_0000.
3355 SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
3358 // xor by OnesV to invert it.
3359 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV);
3360 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
3363 // Check to see if this is a wide variety of vsplti*, binop self cases.
3364 unsigned SplatBitSize = SplatSize*8;
3365 static const signed char SplatCsts[] = {
3366 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
3367 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
3370 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
3371 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
3372 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
3373 int i = SplatCsts[idx];
3375 // Figure out what shift amount will be used by altivec if shifted by i in
3377 unsigned TypeShiftAmt = i & (SplatBitSize-1);
3379 // vsplti + shl self.
3380 if (SextVal == (i << (int)TypeShiftAmt)) {
3381 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
3382 static const unsigned IIDs[] = { // Intrinsic to use for each size.
3383 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
3384 Intrinsic::ppc_altivec_vslw
3386 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
3387 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
3390 // vsplti + srl self.
3391 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
3392 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
3393 static const unsigned IIDs[] = { // Intrinsic to use for each size.
3394 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
3395 Intrinsic::ppc_altivec_vsrw
3397 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
3398 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
3401 // vsplti + sra self.
3402 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
3403 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
3404 static const unsigned IIDs[] = { // Intrinsic to use for each size.
3405 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
3406 Intrinsic::ppc_altivec_vsraw
3408 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
3409 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
3412 // vsplti + rol self.
3413 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
3414 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
3415 SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG);
3416 static const unsigned IIDs[] = { // Intrinsic to use for each size.
3417 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
3418 Intrinsic::ppc_altivec_vrlw
3420 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG);
3421 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
3424 // t = vsplti c, result = vsldoi t, t, 1
3425 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) {
3426 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
3427 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG);
3429 // t = vsplti c, result = vsldoi t, t, 2
3430 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) {
3431 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
3432 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG);
3434 // t = vsplti c, result = vsldoi t, t, 3
3435 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) {
3436 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
3437 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG);
3441 // Three instruction sequences.
3443 // Odd, in range [17,31]: (vsplti C)-(vsplti -16).
3444 if (SextVal >= 0 && SextVal <= 31) {
3445 SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG);
3446 SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
3447 LHS = DAG.getNode(ISD::SUB, LHS.getValueType(), LHS, RHS);
3448 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
3450 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
3451 if (SextVal >= -31 && SextVal <= 0) {
3452 SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG);
3453 SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG);
3454 LHS = DAG.getNode(ISD::ADD, LHS.getValueType(), LHS, RHS);
3455 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS);
3462 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
3463 /// the specified operations to build the shuffle.
3464 static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS,
3465 SDOperand RHS, SelectionDAG &DAG) {
3466 unsigned OpNum = (PFEntry >> 26) & 0x0F;
3467 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
3468 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
3471 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
3483 if (OpNum == OP_COPY) {
3484 if (LHSID == (1*9+2)*9+3) return LHS;
3485 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
3489 SDOperand OpLHS, OpRHS;
3490 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG);
3491 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG);
3493 unsigned ShufIdxs[16];
3495 default: assert(0 && "Unknown i32 permute!");
3497 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
3498 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
3499 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
3500 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
3503 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
3504 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
3505 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
3506 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
3509 for (unsigned i = 0; i != 16; ++i)
3510 ShufIdxs[i] = (i&3)+0;
3513 for (unsigned i = 0; i != 16; ++i)
3514 ShufIdxs[i] = (i&3)+4;
3517 for (unsigned i = 0; i != 16; ++i)
3518 ShufIdxs[i] = (i&3)+8;
3521 for (unsigned i = 0; i != 16; ++i)
3522 ShufIdxs[i] = (i&3)+12;
3525 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG);
3527 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG);
3529 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG);
3532 for (unsigned i = 0; i != 16; ++i)
3533 Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i8);
3535 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS,
3536 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16));
3539 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
3540 /// is a shuffle we can handle in a single instruction, return it. Otherwise,
3541 /// return the code it can be lowered into. Worst case, it can always be
3542 /// lowered into a vperm.
3543 SDOperand PPCTargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op,
3544 SelectionDAG &DAG) {
3545 SDOperand V1 = Op.getOperand(0);
3546 SDOperand V2 = Op.getOperand(1);
3547 SDOperand PermMask = Op.getOperand(2);
3549 // Cases that are handled by instructions that take permute immediates
3550 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
3551 // selected by the instruction selector.
3552 if (V2.getOpcode() == ISD::UNDEF) {
3553 if (PPC::isSplatShuffleMask(PermMask.Val, 1) ||
3554 PPC::isSplatShuffleMask(PermMask.Val, 2) ||
3555 PPC::isSplatShuffleMask(PermMask.Val, 4) ||
3556 PPC::isVPKUWUMShuffleMask(PermMask.Val, true) ||
3557 PPC::isVPKUHUMShuffleMask(PermMask.Val, true) ||
3558 PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 ||
3559 PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) ||
3560 PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) ||
3561 PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) ||
3562 PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) ||
3563 PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) ||
3564 PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) {
3569 // Altivec has a variety of "shuffle immediates" that take two vector inputs
3570 // and produce a fixed permutation. If any of these match, do not lower to
3572 if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) ||
3573 PPC::isVPKUHUMShuffleMask(PermMask.Val, false) ||
3574 PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 ||
3575 PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) ||
3576 PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) ||
3577 PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) ||
3578 PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) ||
3579 PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) ||
3580 PPC::isVMRGHShuffleMask(PermMask.Val, 4, false))
3583 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
3584 // perfect shuffle table to emit an optimal matching sequence.
3585 unsigned PFIndexes[4];
3586 bool isFourElementShuffle = true;
3587 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
3588 unsigned EltNo = 8; // Start out undef.
3589 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
3590 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF)
3591 continue; // Undef, ignore it.
3593 unsigned ByteSource =
3594 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue();
3595 if ((ByteSource & 3) != j) {
3596 isFourElementShuffle = false;
3601 EltNo = ByteSource/4;
3602 } else if (EltNo != ByteSource/4) {
3603 isFourElementShuffle = false;
3607 PFIndexes[i] = EltNo;
3610 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
3611 // perfect shuffle vector to determine if it is cost effective to do this as
3612 // discrete instructions, or whether we should use a vperm.
3613 if (isFourElementShuffle) {
3614 // Compute the index in the perfect shuffle table.
3615 unsigned PFTableIndex =
3616 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
3618 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
3619 unsigned Cost = (PFEntry >> 30);
3621 // Determining when to avoid vperm is tricky. Many things affect the cost
3622 // of vperm, particularly how many times the perm mask needs to be computed.
3623 // For example, if the perm mask can be hoisted out of a loop or is already
3624 // used (perhaps because there are multiple permutes with the same shuffle
3625 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of
3626 // the loop requires an extra register.
3628 // As a compromise, we only emit discrete instructions if the shuffle can be
3629 // generated in 3 or fewer operations. When we have loop information
3630 // available, if this block is within a loop, we should avoid using vperm
3631 // for 3-operation perms and use a constant pool load instead.
3633 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG);
3636 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
3637 // vector that will get spilled to the constant pool.
3638 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
3640 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
3641 // that it is in input element units, not in bytes. Convert now.
3642 MVT EltVT = V1.getValueType().getVectorElementType();
3643 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
3645 SmallVector<SDOperand, 16> ResultMask;
3646 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
3648 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF)
3651 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue();
3653 for (unsigned j = 0; j != BytesPerElement; ++j)
3654 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
3658 SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
3659 &ResultMask[0], ResultMask.size());
3660 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask);
3663 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an
3664 /// altivec comparison. If it is, return true and fill in Opc/isDot with
3665 /// information about the intrinsic.
3666 static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc,
3668 unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue();
3671 switch (IntrinsicID) {
3672 default: return false;
3673 // Comparison predicates.
3674 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break;
3675 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break;
3676 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break;
3677 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break;
3678 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break;
3679 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break;
3680 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break;
3681 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break;
3682 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break;
3683 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break;
3684 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break;
3685 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break;
3686 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break;
3688 // Normal Comparisons.
3689 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break;
3690 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break;
3691 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break;
3692 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break;
3693 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break;
3694 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break;
3695 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break;
3696 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break;
3697 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break;
3698 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break;
3699 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break;
3700 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break;
3701 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break;
3706 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
3707 /// lower, do it, otherwise return null.
3708 SDOperand PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op,
3709 SelectionDAG &DAG) {
3710 // If this is a lowered altivec predicate compare, CompareOpc is set to the
3711 // opcode number of the comparison.
3714 if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
3715 return SDOperand(); // Don't custom lower most intrinsics.
3717 // If this is a non-dot comparison, make the VCMP node and we are done.
3719 SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(),
3720 Op.getOperand(1), Op.getOperand(2),
3721 DAG.getConstant(CompareOpc, MVT::i32));
3722 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp);
3725 // Create the PPCISD altivec 'dot' comparison node.
3727 Op.getOperand(2), // LHS
3728 Op.getOperand(3), // RHS
3729 DAG.getConstant(CompareOpc, MVT::i32)
3731 std::vector<MVT> VTs;
3732 VTs.push_back(Op.getOperand(2).getValueType());
3733 VTs.push_back(MVT::Flag);
3734 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
3736 // Now that we have the comparison, emit a copy from the CR to a GPR.
3737 // This is flagged to the above dot comparison.
3738 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32,
3739 DAG.getRegister(PPC::CR6, MVT::i32),
3740 CompNode.getValue(1));
3742 // Unpack the result based on how the target uses it.
3743 unsigned BitNo; // Bit # of CR6.
3744 bool InvertBit; // Invert result?
3745 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) {
3746 default: // Can't happen, don't crash on invalid number though.
3747 case 0: // Return the value of the EQ bit of CR6.
3748 BitNo = 0; InvertBit = false;
3750 case 1: // Return the inverted value of the EQ bit of CR6.
3751 BitNo = 0; InvertBit = true;
3753 case 2: // Return the value of the LT bit of CR6.
3754 BitNo = 2; InvertBit = false;
3756 case 3: // Return the inverted value of the LT bit of CR6.
3757 BitNo = 2; InvertBit = true;
3761 // Shift the bit into the low position.
3762 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags,
3763 DAG.getConstant(8-(3-BitNo), MVT::i32));
3765 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags,
3766 DAG.getConstant(1, MVT::i32));
3768 // If we are supposed to, toggle the bit.
3770 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags,
3771 DAG.getConstant(1, MVT::i32));
3775 SDOperand PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op,
3776 SelectionDAG &DAG) {
3777 // Create a stack slot that is 16-byte aligned.
3778 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
3779 int FrameIdx = FrameInfo->CreateStackObject(16, 16);
3780 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
3781 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
3783 // Store the input value into Value#0 of the stack slot.
3784 SDOperand Store = DAG.getStore(DAG.getEntryNode(),
3785 Op.getOperand(0), FIdx, NULL, 0);
3787 return DAG.getLoad(Op.getValueType(), Store, FIdx, NULL, 0);
3790 SDOperand PPCTargetLowering::LowerMUL(SDOperand Op, SelectionDAG &DAG) {
3791 if (Op.getValueType() == MVT::v4i32) {
3792 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
3794 SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG);
3795 SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt.
3797 SDOperand RHSSwap = // = vrlw RHS, 16
3798 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG);
3800 // Shrinkify inputs to v8i16.
3801 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS);
3802 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS);
3803 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap);
3805 // Low parts multiplied together, generating 32-bit results (we ignore the
3807 SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
3808 LHS, RHS, DAG, MVT::v4i32);
3810 SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
3811 LHS, RHSSwap, Zero, DAG, MVT::v4i32);
3812 // Shift the high parts up 16 bits.
3813 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG);
3814 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd);
3815 } else if (Op.getValueType() == MVT::v8i16) {
3816 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
3818 SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG);
3820 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
3821 LHS, RHS, Zero, DAG);
3822 } else if (Op.getValueType() == MVT::v16i8) {
3823 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
3825 // Multiply the even 8-bit parts, producing 16-bit sums.
3826 SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
3827 LHS, RHS, DAG, MVT::v8i16);
3828 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts);
3830 // Multiply the odd 8-bit parts, producing 16-bit sums.
3831 SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
3832 LHS, RHS, DAG, MVT::v8i16);
3833 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts);
3835 // Merge the results together.
3837 for (unsigned i = 0; i != 8; ++i) {
3838 Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8);
3839 Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8);
3841 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts,
3842 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16));
3844 assert(0 && "Unknown mul to lower!");
3849 /// LowerOperation - Provide custom lowering hooks for some operations.
3851 SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
3852 switch (Op.getOpcode()) {
3853 default: assert(0 && "Wasn't expecting to be able to lower this!");
3854 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3855 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3856 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3857 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
3858 case ISD::SETCC: return LowerSETCC(Op, DAG);
3860 return LowerVASTART(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
3861 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
3864 return LowerVAARG(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
3865 VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
3867 case ISD::FORMAL_ARGUMENTS:
3868 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex,
3869 VarArgsStackOffset, VarArgsNumGPR,
3870 VarArgsNumFPR, PPCSubTarget);
3872 case ISD::CALL: return LowerCALL(Op, DAG, PPCSubTarget,
3873 getTargetMachine());
3874 case ISD::RET: return LowerRET(Op, DAG, getTargetMachine());
3875 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget);
3876 case ISD::DYNAMIC_STACKALLOC:
3877 return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget);
3879 case ISD::ATOMIC_LOAD_ADD: return LowerAtomicLOAD_ADD(Op, DAG);
3880 case ISD::ATOMIC_CMP_SWAP: return LowerAtomicCMP_SWAP(Op, DAG);
3881 case ISD::ATOMIC_SWAP: return LowerAtomicSWAP(Op, DAG);
3883 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
3884 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
3885 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
3886 case ISD::FP_ROUND_INREG: return LowerFP_ROUND_INREG(Op, DAG);
3887 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
3889 // Lower 64-bit shifts.
3890 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
3891 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG);
3892 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG);
3894 // Vector-related lowering.
3895 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
3896 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3897 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3898 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
3899 case ISD::MUL: return LowerMUL(Op, DAG);
3901 // Frame & Return address.
3902 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
3903 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
3908 SDNode *PPCTargetLowering::ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) {
3909 switch (N->getOpcode()) {
3910 default: assert(0 && "Wasn't expecting to be able to lower this!");
3911 case ISD::FP_TO_SINT: {
3912 SDOperand Res = LowerFP_TO_SINT(SDOperand(N, 0), DAG);
3913 // Use MERGE_VALUES to drop the chain result value and get a node with one
3914 // result. This requires turning off getMergeValues simplification, since
3915 // otherwise it will give us Res back.
3916 return DAG.getMergeValues(&Res, 1, false).Val;
3922 //===----------------------------------------------------------------------===//
3923 // Other Lowering Code
3924 //===----------------------------------------------------------------------===//
3927 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
3928 MachineBasicBlock *BB) {
3929 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
3931 // To "insert" these instructions we actually have to insert their
3932 // control-flow patterns.
3933 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3934 MachineFunction::iterator It = BB;
3937 MachineFunction *F = BB->getParent();
3939 if (MI->getOpcode() == PPC::SELECT_CC_I4 ||
3940 MI->getOpcode() == PPC::SELECT_CC_I8 ||
3941 MI->getOpcode() == PPC::SELECT_CC_F4 ||
3942 MI->getOpcode() == PPC::SELECT_CC_F8 ||
3943 MI->getOpcode() == PPC::SELECT_CC_VRRC) {
3945 // The incoming instruction knows the destination vreg to set, the
3946 // condition code register to branch on, the true/false values to
3947 // select between, and a branch opcode to use.
3952 // cmpTY ccX, r1, r2
3954 // fallthrough --> copy0MBB
3955 MachineBasicBlock *thisMBB = BB;
3956 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
3957 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3958 unsigned SelectPred = MI->getOperand(4).getImm();
3959 BuildMI(BB, TII->get(PPC::BCC))
3960 .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
3961 F->insert(It, copy0MBB);
3962 F->insert(It, sinkMBB);
3963 // Update machine-CFG edges by transferring all successors of the current
3964 // block to the new block which will contain the Phi node for the select.
3965 sinkMBB->transferSuccessors(BB);
3966 // Next, add the true and fallthrough blocks as its successors.
3967 BB->addSuccessor(copy0MBB);
3968 BB->addSuccessor(sinkMBB);
3971 // %FalseValue = ...
3972 // # fallthrough to sinkMBB
3975 // Update machine-CFG edges
3976 BB->addSuccessor(sinkMBB);
3979 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
3982 BuildMI(BB, TII->get(PPC::PHI), MI->getOperand(0).getReg())
3983 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
3984 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
3986 else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32 ||
3987 MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) {
3988 bool is64bit = MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64;
3990 unsigned dest = MI->getOperand(0).getReg();
3991 unsigned ptrA = MI->getOperand(1).getReg();
3992 unsigned ptrB = MI->getOperand(2).getReg();
3993 unsigned incr = MI->getOperand(3).getReg();
3995 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
3996 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
3997 F->insert(It, loopMBB);
3998 F->insert(It, exitMBB);
3999 exitMBB->transferSuccessors(BB);
4001 MachineRegisterInfo &RegInfo = F->getRegInfo();
4002 unsigned TmpReg = RegInfo.createVirtualRegister(
4003 is64bit ? (const TargetRegisterClass *) &PPC::GPRCRegClass :
4004 (const TargetRegisterClass *) &PPC::G8RCRegClass);
4008 // fallthrough --> loopMBB
4009 BB->addSuccessor(loopMBB);
4012 // l[wd]arx dest, ptr
4013 // add r0, dest, incr
4014 // st[wd]cx. r0, ptr
4016 // fallthrough --> exitMBB
4018 BuildMI(BB, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
4019 .addReg(ptrA).addReg(ptrB);
4020 BuildMI(BB, TII->get(is64bit ? PPC::ADD4 : PPC::ADD8), TmpReg)
4021 .addReg(incr).addReg(dest);
4022 BuildMI(BB, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
4023 .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
4024 BuildMI(BB, TII->get(PPC::BCC))
4025 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
4026 BB->addSuccessor(loopMBB);
4027 BB->addSuccessor(exitMBB);
4033 else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
4034 MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) {
4035 bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
4037 unsigned dest = MI->getOperand(0).getReg();
4038 unsigned ptrA = MI->getOperand(1).getReg();
4039 unsigned ptrB = MI->getOperand(2).getReg();
4040 unsigned oldval = MI->getOperand(3).getReg();
4041 unsigned newval = MI->getOperand(4).getReg();
4043 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
4044 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
4045 F->insert(It, loopMBB);
4046 F->insert(It, exitMBB);
4047 exitMBB->transferSuccessors(BB);
4051 // fallthrough --> loopMBB
4052 BB->addSuccessor(loopMBB);
4055 // l[wd]arx dest, ptr
4056 // cmp[wd] dest, oldval
4058 // st[wd]cx. newval, ptr
4060 // fallthrough --> exitMBB
4062 BuildMI(BB, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
4063 .addReg(ptrA).addReg(ptrB);
4064 BuildMI(BB, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
4065 .addReg(oldval).addReg(dest);
4066 BuildMI(BB, TII->get(PPC::BCC))
4067 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(exitMBB);
4068 BuildMI(BB, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
4069 .addReg(newval).addReg(ptrA).addReg(ptrB);
4070 BuildMI(BB, TII->get(PPC::BCC))
4071 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
4072 BB->addSuccessor(loopMBB);
4073 BB->addSuccessor(exitMBB);
4079 else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32 ||
4080 MI->getOpcode() == PPC::ATOMIC_SWAP_I64) {
4081 bool is64bit = MI->getOpcode() == PPC::ATOMIC_SWAP_I64;
4083 unsigned dest = MI->getOperand(0).getReg();
4084 unsigned ptrA = MI->getOperand(1).getReg();
4085 unsigned ptrB = MI->getOperand(2).getReg();
4086 unsigned newval = MI->getOperand(3).getReg();
4088 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
4089 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
4090 F->insert(It, loopMBB);
4091 F->insert(It, exitMBB);
4092 exitMBB->transferSuccessors(BB);
4096 // fallthrough --> loopMBB
4097 BB->addSuccessor(loopMBB);
4100 // l[wd]arx dest, ptr
4101 // st[wd]cx. newval, ptr
4103 // fallthrough --> exitMBB
4105 BuildMI(BB, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
4106 .addReg(ptrA).addReg(ptrB);
4107 BuildMI(BB, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
4108 .addReg(newval).addReg(ptrA).addReg(ptrB);
4109 BuildMI(BB, TII->get(PPC::BCC))
4110 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
4111 BB->addSuccessor(loopMBB);
4112 BB->addSuccessor(exitMBB);
4119 assert(0 && "Unexpected instr type to insert");
4122 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
4126 //===----------------------------------------------------------------------===//
4127 // Target Optimization Hooks
4128 //===----------------------------------------------------------------------===//
4130 SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N,
4131 DAGCombinerInfo &DCI) const {
4132 TargetMachine &TM = getTargetMachine();
4133 SelectionDAG &DAG = DCI.DAG;
4134 switch (N->getOpcode()) {
4137 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
4138 if (C->getValue() == 0) // 0 << V -> 0.
4139 return N->getOperand(0);
4143 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
4144 if (C->getValue() == 0) // 0 >>u V -> 0.
4145 return N->getOperand(0);
4149 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
4150 if (C->getValue() == 0 || // 0 >>s V -> 0.
4151 C->isAllOnesValue()) // -1 >>s V -> -1.
4152 return N->getOperand(0);
4156 case ISD::SINT_TO_FP:
4157 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
4158 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
4159 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
4160 // We allow the src/dst to be either f32/f64, but the intermediate
4161 // type must be i64.
4162 if (N->getOperand(0).getValueType() == MVT::i64 &&
4163 N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) {
4164 SDOperand Val = N->getOperand(0).getOperand(0);
4165 if (Val.getValueType() == MVT::f32) {
4166 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
4167 DCI.AddToWorklist(Val.Val);
4170 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val);
4171 DCI.AddToWorklist(Val.Val);
4172 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val);
4173 DCI.AddToWorklist(Val.Val);
4174 if (N->getValueType(0) == MVT::f32) {
4175 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val,
4176 DAG.getIntPtrConstant(0));
4177 DCI.AddToWorklist(Val.Val);
4180 } else if (N->getOperand(0).getValueType() == MVT::i32) {
4181 // If the intermediate type is i32, we can avoid the load/store here
4188 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
4189 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
4190 !cast<StoreSDNode>(N)->isTruncatingStore() &&
4191 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
4192 N->getOperand(1).getValueType() == MVT::i32 &&
4193 N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) {
4194 SDOperand Val = N->getOperand(1).getOperand(0);
4195 if (Val.getValueType() == MVT::f32) {
4196 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
4197 DCI.AddToWorklist(Val.Val);
4199 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val);
4200 DCI.AddToWorklist(Val.Val);
4202 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val,
4203 N->getOperand(2), N->getOperand(3));
4204 DCI.AddToWorklist(Val.Val);
4208 // Turn STORE (BSWAP) -> sthbrx/stwbrx.
4209 if (N->getOperand(1).getOpcode() == ISD::BSWAP &&
4210 N->getOperand(1).Val->hasOneUse() &&
4211 (N->getOperand(1).getValueType() == MVT::i32 ||
4212 N->getOperand(1).getValueType() == MVT::i16)) {
4213 SDOperand BSwapOp = N->getOperand(1).getOperand(0);
4214 // Do an any-extend to 32-bits if this is a half-word input.
4215 if (BSwapOp.getValueType() == MVT::i16)
4216 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp);
4218 return DAG.getNode(PPCISD::STBRX, MVT::Other, N->getOperand(0), BSwapOp,
4219 N->getOperand(2), N->getOperand(3),
4220 DAG.getValueType(N->getOperand(1).getValueType()));
4224 // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
4225 if (ISD::isNON_EXTLoad(N->getOperand(0).Val) &&
4226 N->getOperand(0).hasOneUse() &&
4227 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) {
4228 SDOperand Load = N->getOperand(0);
4229 LoadSDNode *LD = cast<LoadSDNode>(Load);
4230 // Create the byte-swapping load.
4231 std::vector<MVT> VTs;
4232 VTs.push_back(MVT::i32);
4233 VTs.push_back(MVT::Other);
4234 SDOperand MO = DAG.getMemOperand(LD->getMemOperand());
4236 LD->getChain(), // Chain
4237 LD->getBasePtr(), // Ptr
4239 DAG.getValueType(N->getValueType(0)) // VT
4241 SDOperand BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4);
4243 // If this is an i16 load, insert the truncate.
4244 SDOperand ResVal = BSLoad;
4245 if (N->getValueType(0) == MVT::i16)
4246 ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad);
4248 // First, combine the bswap away. This makes the value produced by the
4250 DCI.CombineTo(N, ResVal);
4252 // Next, combine the load away, we give it a bogus result value but a real
4253 // chain result. The result value is dead because the bswap is dead.
4254 DCI.CombineTo(Load.Val, ResVal, BSLoad.getValue(1));
4256 // Return N so it doesn't get rechecked!
4257 return SDOperand(N, 0);
4261 case PPCISD::VCMP: {
4262 // If a VCMPo node already exists with exactly the same operands as this
4263 // node, use its result instead of this node (VCMPo computes both a CR6 and
4264 // a normal output).
4266 if (!N->getOperand(0).hasOneUse() &&
4267 !N->getOperand(1).hasOneUse() &&
4268 !N->getOperand(2).hasOneUse()) {
4270 // Scan all of the users of the LHS, looking for VCMPo's that match.
4271 SDNode *VCMPoNode = 0;
4273 SDNode *LHSN = N->getOperand(0).Val;
4274 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
4276 if (UI->getOpcode() == PPCISD::VCMPo &&
4277 UI->getOperand(1) == N->getOperand(1) &&
4278 UI->getOperand(2) == N->getOperand(2) &&
4279 UI->getOperand(0) == N->getOperand(0)) {
4284 // If there is no VCMPo node, or if the flag value has a single use, don't
4286 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
4289 // Look at the (necessarily single) use of the flag value. If it has a
4290 // chain, this transformation is more complex. Note that multiple things
4291 // could use the value result, which we should ignore.
4292 SDNode *FlagUser = 0;
4293 for (SDNode::use_iterator UI = VCMPoNode->use_begin();
4294 FlagUser == 0; ++UI) {
4295 assert(UI != VCMPoNode->use_end() && "Didn't find user!");
4297 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
4298 if (User->getOperand(i) == SDOperand(VCMPoNode, 1)) {
4305 // If the user is a MFCR instruction, we know this is safe. Otherwise we
4306 // give up for right now.
4307 if (FlagUser->getOpcode() == PPCISD::MFCR)
4308 return SDOperand(VCMPoNode, 0);
4313 // If this is a branch on an altivec predicate comparison, lower this so
4314 // that we don't have to do a MFCR: instead, branch directly on CR6. This
4315 // lowering is done pre-legalize, because the legalizer lowers the predicate
4316 // compare down to code that is difficult to reassemble.
4317 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
4318 SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3);
4322 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
4323 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
4324 getAltivecCompareInfo(LHS, CompareOpc, isDot)) {
4325 assert(isDot && "Can't compare against a vector result!");
4327 // If this is a comparison against something other than 0/1, then we know
4328 // that the condition is never/always true.
4329 unsigned Val = cast<ConstantSDNode>(RHS)->getValue();
4330 if (Val != 0 && Val != 1) {
4331 if (CC == ISD::SETEQ) // Cond never true, remove branch.
4332 return N->getOperand(0);
4333 // Always !=, turn it into an unconditional branch.
4334 return DAG.getNode(ISD::BR, MVT::Other,
4335 N->getOperand(0), N->getOperand(4));
4338 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
4340 // Create the PPCISD altivec 'dot' comparison node.
4341 std::vector<MVT> VTs;
4343 LHS.getOperand(2), // LHS of compare
4344 LHS.getOperand(3), // RHS of compare
4345 DAG.getConstant(CompareOpc, MVT::i32)
4347 VTs.push_back(LHS.getOperand(2).getValueType());
4348 VTs.push_back(MVT::Flag);
4349 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
4351 // Unpack the result based on how the target uses it.
4352 PPC::Predicate CompOpc;
4353 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) {
4354 default: // Can't happen, don't crash on invalid number though.
4355 case 0: // Branch on the value of the EQ bit of CR6.
4356 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
4358 case 1: // Branch on the inverted value of the EQ bit of CR6.
4359 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
4361 case 2: // Branch on the value of the LT bit of CR6.
4362 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
4364 case 3: // Branch on the inverted value of the LT bit of CR6.
4365 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
4369 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0),
4370 DAG.getConstant(CompOpc, MVT::i32),
4371 DAG.getRegister(PPC::CR6, MVT::i32),
4372 N->getOperand(4), CompNode.getValue(1));
4381 //===----------------------------------------------------------------------===//
4382 // Inline Assembly Support
4383 //===----------------------------------------------------------------------===//
4385 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
4389 const SelectionDAG &DAG,
4390 unsigned Depth) const {
4391 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
4392 switch (Op.getOpcode()) {
4394 case PPCISD::LBRX: {
4395 // lhbrx is known to have the top bits cleared out.
4396 if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16)
4397 KnownZero = 0xFFFF0000;
4400 case ISD::INTRINSIC_WO_CHAIN: {
4401 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) {
4403 case Intrinsic::ppc_altivec_vcmpbfp_p:
4404 case Intrinsic::ppc_altivec_vcmpeqfp_p:
4405 case Intrinsic::ppc_altivec_vcmpequb_p:
4406 case Intrinsic::ppc_altivec_vcmpequh_p:
4407 case Intrinsic::ppc_altivec_vcmpequw_p:
4408 case Intrinsic::ppc_altivec_vcmpgefp_p:
4409 case Intrinsic::ppc_altivec_vcmpgtfp_p:
4410 case Intrinsic::ppc_altivec_vcmpgtsb_p:
4411 case Intrinsic::ppc_altivec_vcmpgtsh_p:
4412 case Intrinsic::ppc_altivec_vcmpgtsw_p:
4413 case Intrinsic::ppc_altivec_vcmpgtub_p:
4414 case Intrinsic::ppc_altivec_vcmpgtuh_p:
4415 case Intrinsic::ppc_altivec_vcmpgtuw_p:
4416 KnownZero = ~1U; // All bits but the low one are known to be zero.
4424 /// getConstraintType - Given a constraint, return the type of
4425 /// constraint it is for this target.
4426 PPCTargetLowering::ConstraintType
4427 PPCTargetLowering::getConstraintType(const std::string &Constraint) const {
4428 if (Constraint.size() == 1) {
4429 switch (Constraint[0]) {
4436 return C_RegisterClass;
4439 return TargetLowering::getConstraintType(Constraint);
4442 std::pair<unsigned, const TargetRegisterClass*>
4443 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
4445 if (Constraint.size() == 1) {
4446 // GCC RS6000 Constraint Letters
4447 switch (Constraint[0]) {
4450 if (VT == MVT::i64 && PPCSubTarget.isPPC64())
4451 return std::make_pair(0U, PPC::G8RCRegisterClass);
4452 return std::make_pair(0U, PPC::GPRCRegisterClass);
4455 return std::make_pair(0U, PPC::F4RCRegisterClass);
4456 else if (VT == MVT::f64)
4457 return std::make_pair(0U, PPC::F8RCRegisterClass);
4460 return std::make_pair(0U, PPC::VRRCRegisterClass);
4462 return std::make_pair(0U, PPC::CRRCRegisterClass);
4466 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
4470 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
4471 /// vector. If it is invalid, don't add anything to Ops.
4472 void PPCTargetLowering::LowerAsmOperandForConstraint(SDOperand Op, char Letter,
4473 std::vector<SDOperand>&Ops,
4474 SelectionDAG &DAG) const {
4475 SDOperand Result(0,0);
4486 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
4487 if (!CST) return; // Must be an immediate to match.
4488 unsigned Value = CST->getValue();
4490 default: assert(0 && "Unknown constraint letter!");
4491 case 'I': // "I" is a signed 16-bit constant.
4492 if ((short)Value == (int)Value)
4493 Result = DAG.getTargetConstant(Value, Op.getValueType());
4495 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
4496 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
4497 if ((short)Value == 0)
4498 Result = DAG.getTargetConstant(Value, Op.getValueType());
4500 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
4501 if ((Value >> 16) == 0)
4502 Result = DAG.getTargetConstant(Value, Op.getValueType());
4504 case 'M': // "M" is a constant that is greater than 31.
4506 Result = DAG.getTargetConstant(Value, Op.getValueType());
4508 case 'N': // "N" is a positive constant that is an exact power of two.
4509 if ((int)Value > 0 && isPowerOf2_32(Value))
4510 Result = DAG.getTargetConstant(Value, Op.getValueType());
4512 case 'O': // "O" is the constant zero.
4514 Result = DAG.getTargetConstant(Value, Op.getValueType());
4516 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
4517 if ((short)-Value == (int)-Value)
4518 Result = DAG.getTargetConstant(Value, Op.getValueType());
4526 Ops.push_back(Result);
4530 // Handle standard constraint letters.
4531 TargetLowering::LowerAsmOperandForConstraint(Op, Letter, Ops, DAG);
4534 // isLegalAddressingMode - Return true if the addressing mode represented
4535 // by AM is legal for this target, for a load/store of the specified type.
4536 bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM,
4537 const Type *Ty) const {
4538 // FIXME: PPC does not allow r+i addressing modes for vectors!
4540 // PPC allows a sign-extended 16-bit immediate field.
4541 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
4544 // No global is ever allowed as a base.
4548 // PPC only support r+r,
4550 case 0: // "r+i" or just "i", depending on HasBaseReg.
4553 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
4555 // Otherwise we have r+r or r+i.
4558 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
4560 // Allow 2*r as r+r.
4563 // No other scales are supported.
4570 /// isLegalAddressImmediate - Return true if the integer value can be used
4571 /// as the offset of the target addressing mode for load / store of the
4573 bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,const Type *Ty) const{
4574 // PPC allows a sign-extended 16-bit immediate field.
4575 return (V > -(1 << 16) && V < (1 << 16)-1);
4578 bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
4582 SDOperand PPCTargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) {
4583 // Depths > 0 not supported yet!
4584 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0)
4587 MachineFunction &MF = DAG.getMachineFunction();
4588 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4590 // Just load the return address off the stack.
4591 SDOperand RetAddrFI = getReturnAddrFrameIndex(DAG);
4593 // Make sure the function really does not optimize away the store of the RA
4595 FuncInfo->setLRStoreRequired();
4596 return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0);
4599 SDOperand PPCTargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) {
4600 // Depths > 0 not supported yet!
4601 if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0)
4604 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
4605 bool isPPC64 = PtrVT == MVT::i64;
4607 MachineFunction &MF = DAG.getMachineFunction();
4608 MachineFrameInfo *MFI = MF.getFrameInfo();
4609 bool is31 = (NoFramePointerElim || MFI->hasVarSizedObjects())
4610 && MFI->getStackSize();
4613 return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::X31 : PPC::X1,
4616 return DAG.getCopyFromReg(DAG.getEntryNode(), is31 ? PPC::R31 : PPC::R1,