1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the PPCISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "PPCISelLowering.h"
15 #include "PPCTargetMachine.h"
16 #include "PPCPerfectShuffle.h"
17 #include "llvm/ADT/VectorExtras.h"
18 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/SSARegMap.h"
24 #include "llvm/Constants.h"
25 #include "llvm/Function.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Target/TargetOptions.h"
31 PPCTargetLowering::PPCTargetLowering(TargetMachine &TM)
32 : TargetLowering(TM) {
34 // Fold away setcc operations if possible.
35 setSetCCIsExpensive();
38 // Use _setjmp/_longjmp instead of setjmp/longjmp.
39 setUseUnderscoreSetJmpLongJmp(true);
41 // Set up the register classes.
42 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
43 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
44 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
46 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
47 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
49 // PowerPC has no intrinsics for these particular operations
50 setOperationAction(ISD::MEMMOVE, MVT::Other, Expand);
51 setOperationAction(ISD::MEMSET, MVT::Other, Expand);
52 setOperationAction(ISD::MEMCPY, MVT::Other, Expand);
54 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
55 setOperationAction(ISD::SEXTLOAD, MVT::i1, Expand);
56 setOperationAction(ISD::SEXTLOAD, MVT::i8, Expand);
58 // PowerPC has no SREM/UREM instructions
59 setOperationAction(ISD::SREM, MVT::i32, Expand);
60 setOperationAction(ISD::UREM, MVT::i32, Expand);
62 // We don't support sin/cos/sqrt/fmod
63 setOperationAction(ISD::FSIN , MVT::f64, Expand);
64 setOperationAction(ISD::FCOS , MVT::f64, Expand);
65 setOperationAction(ISD::FREM , MVT::f64, Expand);
66 setOperationAction(ISD::FSIN , MVT::f32, Expand);
67 setOperationAction(ISD::FCOS , MVT::f32, Expand);
68 setOperationAction(ISD::FREM , MVT::f32, Expand);
70 // If we're enabling GP optimizations, use hardware square root
71 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
72 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
73 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
76 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
77 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
79 // PowerPC does not have BSWAP, CTPOP or CTTZ
80 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
81 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
82 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
84 // PowerPC does not have ROTR
85 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
87 // PowerPC does not have Select
88 setOperationAction(ISD::SELECT, MVT::i32, Expand);
89 setOperationAction(ISD::SELECT, MVT::f32, Expand);
90 setOperationAction(ISD::SELECT, MVT::f64, Expand);
92 // PowerPC wants to turn select_cc of FP into fsel when possible.
93 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
94 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
96 // PowerPC wants to optimize integer setcc a bit
97 setOperationAction(ISD::SETCC, MVT::i32, Custom);
99 // PowerPC does not have BRCOND which requires SetCC
100 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
102 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
103 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
105 // PowerPC does not have [U|S]INT_TO_FP
106 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
107 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
109 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
110 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
112 // PowerPC does not have truncstore for i1.
113 setOperationAction(ISD::TRUNCSTORE, MVT::i1, Promote);
115 // Support label based line numbers.
116 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
117 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
118 // FIXME - use subtarget debug flags
119 if (!TM.getSubtarget<PPCSubtarget>().isDarwin())
120 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
122 // We want to legalize GlobalAddress and ConstantPool nodes into the
123 // appropriate instructions to materialize the address.
124 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
125 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
127 // RET must be custom lowered, to meet ABI requirements
128 setOperationAction(ISD::RET , MVT::Other, Custom);
130 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
131 setOperationAction(ISD::VASTART , MVT::Other, Custom);
133 // Use the default implementation.
134 setOperationAction(ISD::VAARG , MVT::Other, Expand);
135 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
136 setOperationAction(ISD::VAEND , MVT::Other, Expand);
137 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
138 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
139 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
141 // We want to custom lower some of our intrinsics.
142 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
144 if (TM.getSubtarget<PPCSubtarget>().is64Bit()) {
145 // They also have instructions for converting between i64 and fp.
146 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
147 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
149 // FIXME: disable this lowered code. This generates 64-bit register values,
150 // and we don't model the fact that the top part is clobbered by calls. We
151 // need to flag these together so that the value isn't live across a call.
152 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
154 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
155 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
157 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
158 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
161 if (TM.getSubtarget<PPCSubtarget>().has64BitRegs()) {
162 // 64 bit PowerPC implementations can support i64 types directly
163 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
164 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
165 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
167 // 32 bit PowerPC wants to expand i64 shifts itself.
168 setOperationAction(ISD::SHL, MVT::i64, Custom);
169 setOperationAction(ISD::SRL, MVT::i64, Custom);
170 setOperationAction(ISD::SRA, MVT::i64, Custom);
173 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
174 // First set operation action for all vector types to expand. Then we
175 // will selectively turn on ones that can be effectively codegen'd.
176 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
177 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
178 // add/sub are legal for all supported vector VT's.
179 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal);
180 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal);
182 // We promote all shuffles to v16i8.
183 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote);
184 AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8);
186 // We promote all non-typed operations to v4i32.
187 setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote);
188 AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32);
189 setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote);
190 AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32);
191 setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote);
192 AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32);
193 setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote);
194 AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32);
195 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
196 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32);
197 setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote);
198 AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32);
200 // No other operations are legal.
201 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
202 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
203 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
204 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
205 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
206 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
207 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
208 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand);
210 setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand);
213 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
214 // with merges, splats, etc.
215 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
217 setOperationAction(ISD::AND , MVT::v4i32, Legal);
218 setOperationAction(ISD::OR , MVT::v4i32, Legal);
219 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
220 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
221 setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
222 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
224 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
225 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
226 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
227 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
229 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
230 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
231 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
232 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
234 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
235 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
237 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
238 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
239 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
240 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
243 setSetCCResultContents(ZeroOrOneSetCCResult);
244 setStackPointerRegisterToSaveRestore(PPC::R1);
246 // We have target-specific dag combine patterns for the following nodes:
247 setTargetDAGCombine(ISD::SINT_TO_FP);
248 setTargetDAGCombine(ISD::STORE);
249 setTargetDAGCombine(ISD::BR_CC);
251 computeRegisterProperties();
254 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
257 case PPCISD::FSEL: return "PPCISD::FSEL";
258 case PPCISD::FCFID: return "PPCISD::FCFID";
259 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
260 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
261 case PPCISD::STFIWX: return "PPCISD::STFIWX";
262 case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
263 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
264 case PPCISD::VPERM: return "PPCISD::VPERM";
265 case PPCISD::Hi: return "PPCISD::Hi";
266 case PPCISD::Lo: return "PPCISD::Lo";
267 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
268 case PPCISD::SRL: return "PPCISD::SRL";
269 case PPCISD::SRA: return "PPCISD::SRA";
270 case PPCISD::SHL: return "PPCISD::SHL";
271 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32";
272 case PPCISD::STD_32: return "PPCISD::STD_32";
273 case PPCISD::CALL: return "PPCISD::CALL";
274 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
275 case PPCISD::MFCR: return "PPCISD::MFCR";
276 case PPCISD::VCMP: return "PPCISD::VCMP";
277 case PPCISD::VCMPo: return "PPCISD::VCMPo";
278 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
282 //===----------------------------------------------------------------------===//
283 // Node matching predicates, for use by the tblgen matching code.
284 //===----------------------------------------------------------------------===//
286 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
287 static bool isFloatingPointZero(SDOperand Op) {
288 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
289 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
290 else if (Op.getOpcode() == ISD::EXTLOAD || Op.getOpcode() == ISD::LOAD) {
291 // Maybe this has already been legalized into the constant pool?
292 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
293 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->get()))
294 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
299 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
300 /// true if Op is undef or if it matches the specified value.
301 static bool isConstantOrUndef(SDOperand Op, unsigned Val) {
302 return Op.getOpcode() == ISD::UNDEF ||
303 cast<ConstantSDNode>(Op)->getValue() == Val;
306 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
307 /// VPKUHUM instruction.
308 bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) {
310 for (unsigned i = 0; i != 16; ++i)
311 if (!isConstantOrUndef(N->getOperand(i), i*2+1))
314 for (unsigned i = 0; i != 8; ++i)
315 if (!isConstantOrUndef(N->getOperand(i), i*2+1) ||
316 !isConstantOrUndef(N->getOperand(i+8), i*2+1))
322 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
323 /// VPKUWUM instruction.
324 bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) {
326 for (unsigned i = 0; i != 16; i += 2)
327 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
328 !isConstantOrUndef(N->getOperand(i+1), i*2+3))
331 for (unsigned i = 0; i != 8; i += 2)
332 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
333 !isConstantOrUndef(N->getOperand(i+1), i*2+3) ||
334 !isConstantOrUndef(N->getOperand(i+8), i*2+2) ||
335 !isConstantOrUndef(N->getOperand(i+9), i*2+3))
341 /// isVMerge - Common function, used to match vmrg* shuffles.
343 static bool isVMerge(SDNode *N, unsigned UnitSize,
344 unsigned LHSStart, unsigned RHSStart) {
345 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
346 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
347 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
348 "Unsupported merge size!");
350 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
351 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
352 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j),
353 LHSStart+j+i*UnitSize) ||
354 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j),
355 RHSStart+j+i*UnitSize))
361 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
362 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
363 bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
365 return isVMerge(N, UnitSize, 8, 24);
366 return isVMerge(N, UnitSize, 8, 8);
369 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
370 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
371 bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
373 return isVMerge(N, UnitSize, 0, 16);
374 return isVMerge(N, UnitSize, 0, 0);
378 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
379 /// amount, otherwise return -1.
380 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
381 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
382 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
383 // Find the first non-undef value in the shuffle mask.
385 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i)
388 if (i == 16) return -1; // all undef.
390 // Otherwise, check to see if the rest of the elements are consequtively
391 // numbered from this value.
392 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue();
393 if (ShiftAmt < i) return -1;
397 // Check the rest of the elements to see if they are consequtive.
398 for (++i; i != 16; ++i)
399 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i))
402 // Check the rest of the elements to see if they are consequtive.
403 for (++i; i != 16; ++i)
404 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15))
411 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
412 /// specifies a splat of a single element that is suitable for input to
413 /// VSPLTB/VSPLTH/VSPLTW.
414 bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) {
415 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
416 N->getNumOperands() == 16 &&
417 (EltSize == 1 || EltSize == 2 || EltSize == 4));
419 // This is a splat operation if each element of the permute is the same, and
420 // if the value doesn't reference the second vector.
421 unsigned ElementBase = 0;
422 SDOperand Elt = N->getOperand(0);
423 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt))
424 ElementBase = EltV->getValue();
426 return false; // FIXME: Handle UNDEF elements too!
428 if (cast<ConstantSDNode>(Elt)->getValue() >= 16)
431 // Check that they are consequtive.
432 for (unsigned i = 1; i != EltSize; ++i) {
433 if (!isa<ConstantSDNode>(N->getOperand(i)) ||
434 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase)
438 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
439 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
440 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
441 assert(isa<ConstantSDNode>(N->getOperand(i)) &&
442 "Invalid VECTOR_SHUFFLE mask!");
443 for (unsigned j = 0; j != EltSize; ++j)
444 if (N->getOperand(i+j) != N->getOperand(j))
451 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
452 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
453 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
454 assert(isSplatShuffleMask(N, EltSize));
455 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize;
458 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
459 /// by using a vspltis[bhw] instruction of the specified element size, return
460 /// the constant being splatted. The ByteSize field indicates the number of
461 /// bytes of each element [124] -> [bhw].
462 SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
463 SDOperand OpVal(0, 0);
465 // If ByteSize of the splat is bigger than the element size of the
466 // build_vector, then we have a case where we are checking for a splat where
467 // multiple elements of the buildvector are folded together into a single
468 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
469 unsigned EltSize = 16/N->getNumOperands();
470 if (EltSize < ByteSize) {
471 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
472 SDOperand UniquedVals[4];
473 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
475 // See if all of the elements in the buildvector agree across.
476 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
477 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
478 // If the element isn't a constant, bail fully out.
479 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand();
482 if (UniquedVals[i&(Multiple-1)].Val == 0)
483 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
484 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
485 return SDOperand(); // no match.
488 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
489 // either constant or undef values that are identical for each chunk. See
490 // if these chunks can form into a larger vspltis*.
492 // Check to see if all of the leading entries are either 0 or -1. If
493 // neither, then this won't fit into the immediate field.
494 bool LeadingZero = true;
495 bool LeadingOnes = true;
496 for (unsigned i = 0; i != Multiple-1; ++i) {
497 if (UniquedVals[i].Val == 0) continue; // Must have been undefs.
499 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
500 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
502 // Finally, check the least significant entry.
504 if (UniquedVals[Multiple-1].Val == 0)
505 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef
506 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue();
508 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4)
511 if (UniquedVals[Multiple-1].Val == 0)
512 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef
513 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended();
514 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
515 return DAG.getTargetConstant(Val, MVT::i32);
521 // Check to see if this buildvec has a single non-undef value in its elements.
522 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
523 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
525 OpVal = N->getOperand(i);
526 else if (OpVal != N->getOperand(i))
530 if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def.
532 unsigned ValSizeInBytes = 0;
534 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
535 Value = CN->getValue();
536 ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8;
537 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
538 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
539 Value = FloatToBits(CN->getValue());
543 // If the splat value is larger than the element value, then we can never do
544 // this splat. The only case that we could fit the replicated bits into our
545 // immediate field for would be zero, and we prefer to use vxor for it.
546 if (ValSizeInBytes < ByteSize) return SDOperand();
548 // If the element value is larger than the splat value, cut it in half and
549 // check to see if the two halves are equal. Continue doing this until we
550 // get to ByteSize. This allows us to handle 0x01010101 as 0x01.
551 while (ValSizeInBytes > ByteSize) {
552 ValSizeInBytes >>= 1;
554 // If the top half equals the bottom half, we're still ok.
555 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
556 (Value & ((1 << (8*ValSizeInBytes))-1)))
560 // Properly sign extend the value.
561 int ShAmt = (4-ByteSize)*8;
562 int MaskVal = ((int)Value << ShAmt) >> ShAmt;
564 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
565 if (MaskVal == 0) return SDOperand();
567 // Finally, if this value fits in a 5 bit sext field, return it
568 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal)
569 return DAG.getTargetConstant(MaskVal, MVT::i32);
573 //===----------------------------------------------------------------------===//
574 // LowerOperation implementation
575 //===----------------------------------------------------------------------===//
577 static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
578 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
579 Constant *C = CP->get();
580 SDOperand CPI = DAG.getTargetConstantPool(C, MVT::i32, CP->getAlignment());
581 SDOperand Zero = DAG.getConstant(0, MVT::i32);
583 const TargetMachine &TM = DAG.getTarget();
585 // If this is a non-darwin platform, we don't support non-static relo models
587 if (TM.getRelocationModel() == Reloc::Static ||
588 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
589 // Generate non-pic code that has direct accesses to the constant pool.
590 // The address of the global is just (hi(&g)+lo(&g)).
591 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero);
592 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero);
593 return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
596 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero);
597 if (TM.getRelocationModel() == Reloc::PIC) {
598 // With PIC, the first instruction is actually "GR+hi(&G)".
599 Hi = DAG.getNode(ISD::ADD, MVT::i32,
600 DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi);
603 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero);
604 Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
608 static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
609 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
610 GlobalValue *GV = GSDN->getGlobal();
611 SDOperand GA = DAG.getTargetGlobalAddress(GV, MVT::i32, GSDN->getOffset());
612 SDOperand Zero = DAG.getConstant(0, MVT::i32);
614 const TargetMachine &TM = DAG.getTarget();
616 // If this is a non-darwin platform, we don't support non-static relo models
618 if (TM.getRelocationModel() == Reloc::Static ||
619 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
620 // Generate non-pic code that has direct accesses to globals.
621 // The address of the global is just (hi(&g)+lo(&g)).
622 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero);
623 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero);
624 return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
627 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero);
628 if (TM.getRelocationModel() == Reloc::PIC) {
629 // With PIC, the first instruction is actually "GR+hi(&G)".
630 Hi = DAG.getNode(ISD::ADD, MVT::i32,
631 DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi);
634 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero);
635 Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
637 if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() &&
638 (!GV->isExternal() || GV->hasNotBeenReadFromBytecode()))
641 // If the global is weak or external, we have to go through the lazy
643 return DAG.getLoad(MVT::i32, DAG.getEntryNode(), Lo, DAG.getSrcValue(0));
646 static SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG) {
647 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
649 // If we're comparing for equality to zero, expose the fact that this is
650 // implented as a ctlz/srl pair on ppc, so that the dag combiner can
651 // fold the new nodes.
652 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
653 if (C->isNullValue() && CC == ISD::SETEQ) {
654 MVT::ValueType VT = Op.getOperand(0).getValueType();
655 SDOperand Zext = Op.getOperand(0);
658 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0));
660 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT));
661 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext);
662 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz,
663 DAG.getConstant(Log2b, MVT::i32));
664 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc);
666 // Leave comparisons against 0 and -1 alone for now, since they're usually
667 // optimized. FIXME: revisit this when we can custom lower all setcc
669 if (C->isAllOnesValue() || C->isNullValue())
673 // If we have an integer seteq/setne, turn it into a compare against zero
674 // by subtracting the rhs from the lhs, which is faster than setting a
675 // condition register, reading it back out, and masking the correct bit.
676 MVT::ValueType LHSVT = Op.getOperand(0).getValueType();
677 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
678 MVT::ValueType VT = Op.getValueType();
679 SDOperand Sub = DAG.getNode(ISD::SUB, LHSVT, Op.getOperand(0),
681 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC);
686 static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG,
687 unsigned VarArgsFrameIndex) {
688 // vastart just stores the address of the VarArgsFrameIndex slot into the
689 // memory location argument.
690 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
691 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
692 Op.getOperand(1), Op.getOperand(2));
695 static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) {
697 switch(Op.getNumOperands()) {
699 assert(0 && "Do not know how to return this many arguments!");
702 return SDOperand(); // ret void is legal
704 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
706 if (MVT::isVector(ArgVT))
708 else if (MVT::isInteger(ArgVT))
711 assert(MVT::isFloatingPoint(ArgVT));
715 Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1),
718 // If we haven't noted the R3/F1 are live out, do so now.
719 if (DAG.getMachineFunction().liveout_empty())
720 DAG.getMachineFunction().addLiveOut(ArgReg);
724 Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(2),
726 Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1));
727 // If we haven't noted the R3+R4 are live out, do so now.
728 if (DAG.getMachineFunction().liveout_empty()) {
729 DAG.getMachineFunction().addLiveOut(PPC::R3);
730 DAG.getMachineFunction().addLiveOut(PPC::R4);
734 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1));
737 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
739 static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) {
740 // Not FP? Not a fsel.
741 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) ||
742 !MVT::isFloatingPoint(Op.getOperand(2).getValueType()))
745 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
747 // Cannot handle SETEQ/SETNE.
748 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand();
750 MVT::ValueType ResVT = Op.getValueType();
751 MVT::ValueType CmpVT = Op.getOperand(0).getValueType();
752 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
753 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3);
755 // If the RHS of the comparison is a 0.0, we don't need to do the
756 // subtraction at all.
757 if (isFloatingPointZero(RHS))
759 default: break; // SETUO etc aren't handled by fsel.
762 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
765 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
766 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
767 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV);
770 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
773 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
774 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
775 return DAG.getNode(PPCISD::FSEL, ResVT,
776 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV);
781 default: break; // SETUO etc aren't handled by fsel.
784 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
785 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
786 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
787 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
790 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
791 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
792 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
793 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
796 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
797 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
798 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
799 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
802 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
803 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
804 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
805 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
810 static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
811 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType()));
812 SDOperand Src = Op.getOperand(0);
813 if (Src.getValueType() == MVT::f32)
814 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src);
817 switch (Op.getValueType()) {
818 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!");
820 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src);
823 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src);
827 // Convert the FP value to an int value through memory.
828 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp);
829 if (Op.getValueType() == MVT::i32)
830 Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits);
834 static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
835 if (Op.getOperand(0).getValueType() == MVT::i64) {
836 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0));
837 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits);
838 if (Op.getValueType() == MVT::f32)
839 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP);
843 assert(Op.getOperand(0).getValueType() == MVT::i32 &&
844 "Unhandled SINT_TO_FP type in custom expander!");
845 // Since we only generate this in 64-bit mode, we can take advantage of
846 // 64-bit registers. In particular, sign extend the input value into the
847 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
848 // then lfd it and fcfid it.
849 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
850 int FrameIdx = FrameInfo->CreateStackObject(8, 8);
851 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32);
853 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32,
856 // STD the extended value into the stack slot.
857 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other,
858 DAG.getEntryNode(), Ext64, FIdx,
859 DAG.getSrcValue(NULL));
860 // Load the value as a double.
861 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, DAG.getSrcValue(NULL));
863 // FCFID it and return it.
864 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld);
865 if (Op.getValueType() == MVT::f32)
866 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP);
870 static SDOperand LowerSHL(SDOperand Op, SelectionDAG &DAG) {
871 assert(Op.getValueType() == MVT::i64 &&
872 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
873 // The generic code does a fine job expanding shift by a constant.
874 if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand();
876 // Otherwise, expand into a bunch of logical ops. Note that these ops
877 // depend on the PPC behavior for oversized shift amounts.
878 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
879 DAG.getConstant(0, MVT::i32));
880 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
881 DAG.getConstant(1, MVT::i32));
882 SDOperand Amt = Op.getOperand(1);
884 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
885 DAG.getConstant(32, MVT::i32), Amt);
886 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt);
887 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1);
888 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
889 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
890 DAG.getConstant(-32U, MVT::i32));
891 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5);
892 SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
893 SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt);
894 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
897 static SDOperand LowerSRL(SDOperand Op, SelectionDAG &DAG) {
898 assert(Op.getValueType() == MVT::i64 &&
899 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
900 // The generic code does a fine job expanding shift by a constant.
901 if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand();
903 // Otherwise, expand into a bunch of logical ops. Note that these ops
904 // depend on the PPC behavior for oversized shift amounts.
905 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
906 DAG.getConstant(0, MVT::i32));
907 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
908 DAG.getConstant(1, MVT::i32));
909 SDOperand Amt = Op.getOperand(1);
911 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
912 DAG.getConstant(32, MVT::i32), Amt);
913 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
914 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
915 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
916 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
917 DAG.getConstant(-32U, MVT::i32));
918 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5);
919 SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
920 SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt);
921 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
924 static SDOperand LowerSRA(SDOperand Op, SelectionDAG &DAG) {
925 assert(Op.getValueType() == MVT::i64 &&
926 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!");
927 // The generic code does a fine job expanding shift by a constant.
928 if (isa<ConstantSDNode>(Op.getOperand(1))) return SDOperand();
930 // Otherwise, expand into a bunch of logical ops, followed by a select_cc.
931 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
932 DAG.getConstant(0, MVT::i32));
933 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
934 DAG.getConstant(1, MVT::i32));
935 SDOperand Amt = Op.getOperand(1);
937 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
938 DAG.getConstant(32, MVT::i32), Amt);
939 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
940 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
941 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
942 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
943 DAG.getConstant(-32U, MVT::i32));
944 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5);
945 SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt);
946 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32),
947 Tmp4, Tmp6, ISD::SETLE);
948 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
951 //===----------------------------------------------------------------------===//
952 // Vector related lowering.
955 // If this is a vector of constants or undefs, get the bits. A bit in
956 // UndefBits is set if the corresponding element of the vector is an
957 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
958 // zero. Return true if this is not an array of constants, false if it is.
960 static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2],
961 uint64_t UndefBits[2]) {
962 // Start with zero'd results.
963 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0;
965 unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType());
966 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
967 SDOperand OpVal = BV->getOperand(i);
969 unsigned PartNo = i >= e/2; // In the upper 128 bits?
970 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t.
972 uint64_t EltBits = 0;
973 if (OpVal.getOpcode() == ISD::UNDEF) {
974 uint64_t EltUndefBits = ~0U >> (32-EltBitSize);
975 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize);
977 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
978 EltBits = CN->getValue() & (~0U >> (32-EltBitSize));
979 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
980 assert(CN->getValueType(0) == MVT::f32 &&
981 "Only one legal FP vector type!");
982 EltBits = FloatToBits(CN->getValue());
984 // Nonconstant element.
988 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize);
991 //printf("%llx %llx %llx %llx\n",
992 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]);
996 // If this is a splat (repetition) of a value across the whole vector, return
997 // the smallest size that splats it. For example, "0x01010101010101..." is a
998 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
999 // SplatSize = 1 byte.
1000 static bool isConstantSplat(const uint64_t Bits128[2],
1001 const uint64_t Undef128[2],
1002 unsigned &SplatBits, unsigned &SplatUndef,
1003 unsigned &SplatSize) {
1005 // Don't let undefs prevent splats from matching. See if the top 64-bits are
1006 // the same as the lower 64-bits, ignoring undefs.
1007 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0]))
1008 return false; // Can't be a splat if two pieces don't match.
1010 uint64_t Bits64 = Bits128[0] | Bits128[1];
1011 uint64_t Undef64 = Undef128[0] & Undef128[1];
1013 // Check that the top 32-bits are the same as the lower 32-bits, ignoring
1015 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64))
1016 return false; // Can't be a splat if two pieces don't match.
1018 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32);
1019 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32);
1021 // If the top 16-bits are different than the lower 16-bits, ignoring
1022 // undefs, we have an i32 splat.
1023 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) {
1025 SplatUndef = Undef32;
1030 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16);
1031 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16);
1033 // If the top 8-bits are different than the lower 8-bits, ignoring
1034 // undefs, we have an i16 splat.
1035 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) {
1037 SplatUndef = Undef16;
1042 // Otherwise, we have an 8-bit splat.
1043 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8);
1044 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
1049 /// BuildSplatI - Build a canonical splati of Val with an element size of
1050 /// SplatSize. Cast the result to VT.
1051 static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT,
1052 SelectionDAG &DAG) {
1053 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
1055 // Force vspltis[hw] -1 to vspltisb -1.
1056 if (Val == -1) SplatSize = 1;
1058 static const MVT::ValueType VTys[] = { // canonical VT to use for each size.
1059 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
1061 MVT::ValueType CanonicalVT = VTys[SplatSize-1];
1063 // Build a canonical splat for this value.
1064 SDOperand Elt = DAG.getConstant(Val, MVT::getVectorBaseType(CanonicalVT));
1065 std::vector<SDOperand> Ops(MVT::getVectorNumElements(CanonicalVT), Elt);
1066 SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, Ops);
1067 return DAG.getNode(ISD::BIT_CONVERT, VT, Res);
1070 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
1071 /// specified intrinsic ID.
1072 static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS,
1074 MVT::ValueType DestVT = MVT::Other) {
1075 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
1076 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
1077 DAG.getConstant(IID, MVT::i32), LHS, RHS);
1080 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
1081 /// specified intrinsic ID.
1082 static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1,
1083 SDOperand Op2, SelectionDAG &DAG,
1084 MVT::ValueType DestVT = MVT::Other) {
1085 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
1086 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
1087 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
1091 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
1092 /// amount. The result has the specified value type.
1093 static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt,
1094 MVT::ValueType VT, SelectionDAG &DAG) {
1095 // Force LHS/RHS to be the right type.
1096 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS);
1097 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS);
1099 std::vector<SDOperand> Ops;
1100 for (unsigned i = 0; i != 16; ++i)
1101 Ops.push_back(DAG.getConstant(i+Amt, MVT::i32));
1102 SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS,
1103 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops));
1104 return DAG.getNode(ISD::BIT_CONVERT, VT, T);
1107 // If this is a case we can't handle, return null and let the default
1108 // expansion code take care of it. If we CAN select this case, and if it
1109 // selects to a single instruction, return Op. Otherwise, if we can codegen
1110 // this case more efficiently than a constant pool load, lower it to the
1111 // sequence of ops that should be used.
1112 static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
1113 // If this is a vector of constants or undefs, get the bits. A bit in
1114 // UndefBits is set if the corresponding element of the vector is an
1115 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
1117 uint64_t VectorBits[2];
1118 uint64_t UndefBits[2];
1119 if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits))
1120 return SDOperand(); // Not a constant vector.
1122 // If this is a splat (repetition) of a value across the whole vector, return
1123 // the smallest size that splats it. For example, "0x01010101010101..." is a
1124 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
1125 // SplatSize = 1 byte.
1126 unsigned SplatBits, SplatUndef, SplatSize;
1127 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){
1128 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0;
1130 // First, handle single instruction cases.
1133 if (SplatBits == 0) {
1134 // Canonicalize all zero vectors to be v4i32.
1135 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
1136 SDOperand Z = DAG.getConstant(0, MVT::i32);
1137 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z);
1138 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z);
1143 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
1144 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize);
1145 if (SextVal >= -16 && SextVal <= 15)
1146 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG);
1149 // Two instruction sequences.
1151 // If this value is in the range [-32,30] and is even, use:
1152 // tmp = VSPLTI[bhw], result = add tmp, tmp
1153 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
1154 Op = BuildSplatI(SextVal >> 1, SplatSize, Op.getValueType(), DAG);
1155 return DAG.getNode(ISD::ADD, Op.getValueType(), Op, Op);
1158 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
1159 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
1161 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
1162 // Make -1 and vspltisw -1:
1163 SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG);
1165 // Make the VSLW intrinsic, computing 0x8000_0000.
1166 SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
1169 // xor by OnesV to invert it.
1170 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV);
1171 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
1174 // Check to see if this is a wide variety of vsplti*, binop self cases.
1175 unsigned SplatBitSize = SplatSize*8;
1176 static const char SplatCsts[] = {
1177 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
1178 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
1180 for (unsigned idx = 0; idx < sizeof(SplatCsts)/sizeof(SplatCsts[0]); ++idx){
1181 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
1182 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
1183 int i = SplatCsts[idx];
1185 // Figure out what shift amount will be used by altivec if shifted by i in
1187 unsigned TypeShiftAmt = i & (SplatBitSize-1);
1189 // vsplti + shl self.
1190 if (SextVal == (i << (int)TypeShiftAmt)) {
1191 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
1192 static const unsigned IIDs[] = { // Intrinsic to use for each size.
1193 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
1194 Intrinsic::ppc_altivec_vslw
1196 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
1199 // vsplti + srl self.
1200 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
1201 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
1202 static const unsigned IIDs[] = { // Intrinsic to use for each size.
1203 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
1204 Intrinsic::ppc_altivec_vsrw
1206 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
1209 // vsplti + sra self.
1210 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
1211 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
1212 static const unsigned IIDs[] = { // Intrinsic to use for each size.
1213 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
1214 Intrinsic::ppc_altivec_vsraw
1216 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
1219 // vsplti + rol self.
1220 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
1221 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
1222 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
1223 static const unsigned IIDs[] = { // Intrinsic to use for each size.
1224 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
1225 Intrinsic::ppc_altivec_vrlw
1227 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
1230 // t = vsplti c, result = vsldoi t, t, 1
1231 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) {
1232 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
1233 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG);
1235 // t = vsplti c, result = vsldoi t, t, 2
1236 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) {
1237 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
1238 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG);
1240 // t = vsplti c, result = vsldoi t, t, 3
1241 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) {
1242 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
1243 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG);
1247 // Three instruction sequences.
1249 // Odd, in range [17,31]: (vsplti C)-(vsplti -16).
1250 if (SextVal >= 0 && SextVal <= 31) {
1251 SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, Op.getValueType(),DAG);
1252 SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG);
1253 return DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS);
1255 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
1256 if (SextVal >= -31 && SextVal <= 0) {
1257 SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, Op.getValueType(),DAG);
1258 SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG);
1259 return DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS);
1266 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
1267 /// the specified operations to build the shuffle.
1268 static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS,
1269 SDOperand RHS, SelectionDAG &DAG) {
1270 unsigned OpNum = (PFEntry >> 26) & 0x0F;
1271 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
1272 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
1275 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
1287 if (OpNum == OP_COPY) {
1288 if (LHSID == (1*9+2)*9+3) return LHS;
1289 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
1293 SDOperand OpLHS, OpRHS;
1294 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG);
1295 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG);
1297 unsigned ShufIdxs[16];
1299 default: assert(0 && "Unknown i32 permute!");
1301 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
1302 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
1303 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
1304 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
1307 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
1308 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
1309 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
1310 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
1313 for (unsigned i = 0; i != 16; ++i)
1314 ShufIdxs[i] = (i&3)+0;
1317 for (unsigned i = 0; i != 16; ++i)
1318 ShufIdxs[i] = (i&3)+4;
1321 for (unsigned i = 0; i != 16; ++i)
1322 ShufIdxs[i] = (i&3)+8;
1325 for (unsigned i = 0; i != 16; ++i)
1326 ShufIdxs[i] = (i&3)+12;
1329 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG);
1331 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG);
1333 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG);
1335 std::vector<SDOperand> Ops;
1336 for (unsigned i = 0; i != 16; ++i)
1337 Ops.push_back(DAG.getConstant(ShufIdxs[i], MVT::i32));
1339 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS,
1340 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops));
1343 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
1344 /// is a shuffle we can handle in a single instruction, return it. Otherwise,
1345 /// return the code it can be lowered into. Worst case, it can always be
1346 /// lowered into a vperm.
1347 static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
1348 SDOperand V1 = Op.getOperand(0);
1349 SDOperand V2 = Op.getOperand(1);
1350 SDOperand PermMask = Op.getOperand(2);
1352 // Cases that are handled by instructions that take permute immediates
1353 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
1354 // selected by the instruction selector.
1355 if (V2.getOpcode() == ISD::UNDEF) {
1356 if (PPC::isSplatShuffleMask(PermMask.Val, 1) ||
1357 PPC::isSplatShuffleMask(PermMask.Val, 2) ||
1358 PPC::isSplatShuffleMask(PermMask.Val, 4) ||
1359 PPC::isVPKUWUMShuffleMask(PermMask.Val, true) ||
1360 PPC::isVPKUHUMShuffleMask(PermMask.Val, true) ||
1361 PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 ||
1362 PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) ||
1363 PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) ||
1364 PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) ||
1365 PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) ||
1366 PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) ||
1367 PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) {
1372 // Altivec has a variety of "shuffle immediates" that take two vector inputs
1373 // and produce a fixed permutation. If any of these match, do not lower to
1375 if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) ||
1376 PPC::isVPKUHUMShuffleMask(PermMask.Val, false) ||
1377 PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 ||
1378 PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) ||
1379 PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) ||
1380 PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) ||
1381 PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) ||
1382 PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) ||
1383 PPC::isVMRGHShuffleMask(PermMask.Val, 4, false))
1386 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
1387 // perfect shuffle table to emit an optimal matching sequence.
1388 unsigned PFIndexes[4];
1389 bool isFourElementShuffle = true;
1390 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
1391 unsigned EltNo = 8; // Start out undef.
1392 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
1393 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF)
1394 continue; // Undef, ignore it.
1396 unsigned ByteSource =
1397 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue();
1398 if ((ByteSource & 3) != j) {
1399 isFourElementShuffle = false;
1404 EltNo = ByteSource/4;
1405 } else if (EltNo != ByteSource/4) {
1406 isFourElementShuffle = false;
1410 PFIndexes[i] = EltNo;
1413 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
1414 // perfect shuffle vector to determine if it is cost effective to do this as
1415 // discrete instructions, or whether we should use a vperm.
1416 if (isFourElementShuffle) {
1417 // Compute the index in the perfect shuffle table.
1418 unsigned PFTableIndex =
1419 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
1421 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
1422 unsigned Cost = (PFEntry >> 30);
1424 // Determining when to avoid vperm is tricky. Many things affect the cost
1425 // of vperm, particularly how many times the perm mask needs to be computed.
1426 // For example, if the perm mask can be hoisted out of a loop or is already
1427 // used (perhaps because there are multiple permutes with the same shuffle
1428 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of
1429 // the loop requires an extra register.
1431 // As a compromise, we only emit discrete instructions if the shuffle can be
1432 // generated in 3 or fewer operations. When we have loop information
1433 // available, if this block is within a loop, we should avoid using vperm
1434 // for 3-operation perms and use a constant pool load instead.
1436 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG);
1439 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
1440 // vector that will get spilled to the constant pool.
1441 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
1443 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
1444 // that it is in input element units, not in bytes. Convert now.
1445 MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType());
1446 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8;
1448 std::vector<SDOperand> ResultMask;
1449 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
1451 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF)
1454 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue();
1456 for (unsigned j = 0; j != BytesPerElement; ++j)
1457 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
1461 SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, ResultMask);
1462 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask);
1465 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an
1466 /// altivec comparison. If it is, return true and fill in Opc/isDot with
1467 /// information about the intrinsic.
1468 static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc,
1470 unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue();
1473 switch (IntrinsicID) {
1474 default: return false;
1475 // Comparison predicates.
1476 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break;
1477 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break;
1478 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break;
1479 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break;
1480 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break;
1481 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break;
1482 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break;
1483 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break;
1484 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break;
1485 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break;
1486 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break;
1487 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break;
1488 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break;
1490 // Normal Comparisons.
1491 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break;
1492 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break;
1493 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break;
1494 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break;
1495 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break;
1496 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break;
1497 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break;
1498 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break;
1499 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break;
1500 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break;
1501 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break;
1502 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break;
1503 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break;
1508 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
1509 /// lower, do it, otherwise return null.
1510 static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
1511 // If this is a lowered altivec predicate compare, CompareOpc is set to the
1512 // opcode number of the comparison.
1515 if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
1516 return SDOperand(); // Don't custom lower most intrinsics.
1518 // If this is a non-dot comparison, make the VCMP node and we are done.
1520 SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(),
1521 Op.getOperand(1), Op.getOperand(2),
1522 DAG.getConstant(CompareOpc, MVT::i32));
1523 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp);
1526 // Create the PPCISD altivec 'dot' comparison node.
1527 std::vector<SDOperand> Ops;
1528 std::vector<MVT::ValueType> VTs;
1529 Ops.push_back(Op.getOperand(2)); // LHS
1530 Ops.push_back(Op.getOperand(3)); // RHS
1531 Ops.push_back(DAG.getConstant(CompareOpc, MVT::i32));
1532 VTs.push_back(Op.getOperand(2).getValueType());
1533 VTs.push_back(MVT::Flag);
1534 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops);
1536 // Now that we have the comparison, emit a copy from the CR to a GPR.
1537 // This is flagged to the above dot comparison.
1538 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32,
1539 DAG.getRegister(PPC::CR6, MVT::i32),
1540 CompNode.getValue(1));
1542 // Unpack the result based on how the target uses it.
1543 unsigned BitNo; // Bit # of CR6.
1544 bool InvertBit; // Invert result?
1545 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) {
1546 default: // Can't happen, don't crash on invalid number though.
1547 case 0: // Return the value of the EQ bit of CR6.
1548 BitNo = 0; InvertBit = false;
1550 case 1: // Return the inverted value of the EQ bit of CR6.
1551 BitNo = 0; InvertBit = true;
1553 case 2: // Return the value of the LT bit of CR6.
1554 BitNo = 2; InvertBit = false;
1556 case 3: // Return the inverted value of the LT bit of CR6.
1557 BitNo = 2; InvertBit = true;
1561 // Shift the bit into the low position.
1562 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags,
1563 DAG.getConstant(8-(3-BitNo), MVT::i32));
1565 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags,
1566 DAG.getConstant(1, MVT::i32));
1568 // If we are supposed to, toggle the bit.
1570 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags,
1571 DAG.getConstant(1, MVT::i32));
1575 static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
1576 // Create a stack slot that is 16-byte aligned.
1577 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
1578 int FrameIdx = FrameInfo->CreateStackObject(16, 16);
1579 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32);
1581 // Store the input value into Value#0 of the stack slot.
1582 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(),
1583 Op.getOperand(0), FIdx,DAG.getSrcValue(NULL));
1585 return DAG.getLoad(Op.getValueType(), Store, FIdx, DAG.getSrcValue(NULL));
1588 static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG) {
1589 if (Op.getValueType() == MVT::v4i32) {
1590 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
1592 SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG);
1593 SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt.
1595 SDOperand RHSSwap = // = vrlw RHS, 16
1596 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG);
1598 // Shrinkify inputs to v8i16.
1599 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS);
1600 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS);
1601 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap);
1603 // Low parts multiplied together, generating 32-bit results (we ignore the
1605 SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
1606 LHS, RHS, DAG, MVT::v4i32);
1608 SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
1609 LHS, RHSSwap, Zero, DAG, MVT::v4i32);
1610 // Shift the high parts up 16 bits.
1611 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG);
1612 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd);
1613 } else if (Op.getValueType() == MVT::v8i16) {
1614 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
1616 SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG);
1618 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
1619 LHS, RHS, Zero, DAG);
1620 } else if (Op.getValueType() == MVT::v16i8) {
1621 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
1623 // Multiply the even 8-bit parts, producing 16-bit sums.
1624 SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
1625 LHS, RHS, DAG, MVT::v8i16);
1626 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts);
1628 // Multiply the odd 8-bit parts, producing 16-bit sums.
1629 SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
1630 LHS, RHS, DAG, MVT::v8i16);
1631 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts);
1633 // Merge the results together.
1634 std::vector<SDOperand> Ops;
1635 for (unsigned i = 0; i != 8; ++i) {
1636 Ops.push_back(DAG.getConstant(2*i+1, MVT::i8));
1637 Ops.push_back(DAG.getConstant(2*i+1+16, MVT::i8));
1640 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts,
1641 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops));
1643 assert(0 && "Unknown mul to lower!");
1648 /// LowerOperation - Provide custom lowering hooks for some operations.
1650 SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
1651 switch (Op.getOpcode()) {
1652 default: assert(0 && "Wasn't expecting to be able to lower this!");
1653 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
1654 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
1655 case ISD::SETCC: return LowerSETCC(Op, DAG);
1656 case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex);
1657 case ISD::RET: return LowerRET(Op, DAG);
1659 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
1660 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
1661 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1663 // Lower 64-bit shifts.
1664 case ISD::SHL: return LowerSHL(Op, DAG);
1665 case ISD::SRL: return LowerSRL(Op, DAG);
1666 case ISD::SRA: return LowerSRA(Op, DAG);
1668 // Vector-related lowering.
1669 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
1670 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
1671 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1672 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
1673 case ISD::MUL: return LowerMUL(Op, DAG);
1678 //===----------------------------------------------------------------------===//
1679 // Other Lowering Code
1680 //===----------------------------------------------------------------------===//
1682 std::vector<SDOperand>
1683 PPCTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
1685 // add beautiful description of PPC stack frame format, or at least some docs
1687 MachineFunction &MF = DAG.getMachineFunction();
1688 MachineFrameInfo *MFI = MF.getFrameInfo();
1689 MachineBasicBlock& BB = MF.front();
1690 SSARegMap *RegMap = MF.getSSARegMap();
1691 std::vector<SDOperand> ArgValues;
1693 unsigned ArgOffset = 24;
1694 unsigned GPR_remaining = 8;
1695 unsigned FPR_remaining = 13;
1696 unsigned GPR_idx = 0, FPR_idx = 0;
1697 static const unsigned GPR[] = {
1698 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
1699 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1701 static const unsigned FPR[] = {
1702 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1703 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
1706 // Add DAG nodes to load the arguments... On entry to a function on PPC,
1707 // the arguments start at offset 24, although they are likely to be passed
1709 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
1710 SDOperand newroot, argt;
1712 bool needsLoad = false;
1713 bool ArgLive = !I->use_empty();
1714 MVT::ValueType ObjectVT = getValueType(I->getType());
1717 default: assert(0 && "Unhandled argument type!");
1723 if (!ArgLive) break;
1724 if (GPR_remaining > 0) {
1725 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1726 MF.addLiveIn(GPR[GPR_idx], VReg);
1727 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
1728 if (ObjectVT != MVT::i32) {
1729 unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext
1731 argt = DAG.getNode(AssertOp, MVT::i32, argt,
1732 DAG.getValueType(ObjectVT));
1733 argt = DAG.getNode(ISD::TRUNCATE, ObjectVT, argt);
1741 if (!ArgLive) break;
1742 if (GPR_remaining > 0) {
1743 SDOperand argHi, argLo;
1744 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1745 MF.addLiveIn(GPR[GPR_idx], VReg);
1746 argHi = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
1747 // If we have two or more remaining argument registers, then both halves
1748 // of the i64 can be sourced from there. Otherwise, the lower half will
1749 // have to come off the stack. This can happen when an i64 is preceded
1750 // by 28 bytes of arguments.
1751 if (GPR_remaining > 1) {
1752 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1753 MF.addLiveIn(GPR[GPR_idx+1], VReg);
1754 argLo = DAG.getCopyFromReg(argHi, VReg, MVT::i32);
1756 int FI = MFI->CreateFixedObject(4, ArgOffset+4);
1757 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
1758 argLo = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
1759 DAG.getSrcValue(NULL));
1761 // Build the outgoing arg thingy
1762 argt = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, argLo, argHi);
1770 ObjSize = (ObjectVT == MVT::f64) ? 8 : 4;
1772 if (FPR_remaining > 0) {
1778 if (FPR_remaining > 0) {
1780 if (ObjectVT == MVT::f32)
1781 VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass);
1783 VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass);
1784 MF.addLiveIn(FPR[FPR_idx], VReg);
1785 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, ObjectVT);
1794 // We need to load the argument to a virtual register if we determined above
1795 // that we ran out of physical registers of the appropriate type
1797 unsigned SubregOffset = 0;
1798 if (ObjectVT == MVT::i8 || ObjectVT == MVT::i1) SubregOffset = 3;
1799 if (ObjectVT == MVT::i16) SubregOffset = 2;
1800 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
1801 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
1802 FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN,
1803 DAG.getConstant(SubregOffset, MVT::i32));
1804 argt = newroot = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
1805 DAG.getSrcValue(NULL));
1808 // Every 4 bytes of argument space consumes one of the GPRs available for
1809 // argument passing.
1810 if (GPR_remaining > 0) {
1811 unsigned delta = (GPR_remaining > 1 && ObjSize == 8) ? 2 : 1;
1812 GPR_remaining -= delta;
1815 ArgOffset += ObjSize;
1817 DAG.setRoot(newroot.getValue(1));
1819 ArgValues.push_back(argt);
1822 // If the function takes variable number of arguments, make a frame index for
1823 // the start of the first vararg value... for expansion of llvm.va_start.
1825 VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset);
1826 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
1827 // If this function is vararg, store any remaining integer argument regs
1828 // to their spots on the stack so that they may be loaded by deferencing the
1829 // result of va_next.
1830 std::vector<SDOperand> MemOps;
1831 for (; GPR_remaining > 0; --GPR_remaining, ++GPR_idx) {
1832 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1833 MF.addLiveIn(GPR[GPR_idx], VReg);
1834 SDOperand Val = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
1835 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1),
1836 Val, FIN, DAG.getSrcValue(NULL));
1837 MemOps.push_back(Store);
1838 // Increment the address by four for the next argument to store
1839 SDOperand PtrOff = DAG.getConstant(4, getPointerTy());
1840 FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN, PtrOff);
1842 if (!MemOps.empty()) {
1843 MemOps.push_back(DAG.getRoot());
1844 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps));
1851 std::pair<SDOperand, SDOperand>
1852 PPCTargetLowering::LowerCallTo(SDOperand Chain,
1853 const Type *RetTy, bool isVarArg,
1854 unsigned CallingConv, bool isTailCall,
1855 SDOperand Callee, ArgListTy &Args,
1856 SelectionDAG &DAG) {
1857 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in
1858 // SelectExpr to use to put the arguments in the appropriate registers.
1859 std::vector<SDOperand> args_to_use;
1861 // Count how many bytes are to be pushed on the stack, including the linkage
1862 // area, and parameter passing area.
1863 unsigned NumBytes = 24;
1866 Chain = DAG.getCALLSEQ_START(Chain,
1867 DAG.getConstant(NumBytes, getPointerTy()));
1869 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1870 switch (getValueType(Args[i].second)) {
1871 default: assert(0 && "Unknown value type!");
1886 // Just to be safe, we'll always reserve the full 24 bytes of linkage area
1887 // plus 32 bytes of argument space in case any called code gets funky on us.
1888 // (Required by ABI to support var arg)
1889 if (NumBytes < 56) NumBytes = 56;
1891 // Adjust the stack pointer for the new arguments...
1892 // These operations are automatically eliminated by the prolog/epilog pass
1893 Chain = DAG.getCALLSEQ_START(Chain,
1894 DAG.getConstant(NumBytes, getPointerTy()));
1896 // Set up a copy of the stack pointer for use loading and storing any
1897 // arguments that may not fit in the registers available for argument
1899 SDOperand StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
1901 // Figure out which arguments are going to go in registers, and which in
1902 // memory. Also, if this is a vararg function, floating point operations
1903 // must be stored to our stack, and loaded into integer regs as well, if
1904 // any integer regs are available for argument passing.
1905 unsigned ArgOffset = 24;
1906 unsigned GPR_remaining = 8;
1907 unsigned FPR_remaining = 13;
1909 std::vector<SDOperand> MemOps;
1910 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1911 // PtrOff will be used to store the current argument to the stack if a
1912 // register cannot be found for it.
1913 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1914 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
1915 MVT::ValueType ArgVT = getValueType(Args[i].second);
1918 default: assert(0 && "Unexpected ValueType for argument!");
1922 // Promote the integer to 32 bits. If the input type is signed use a
1923 // sign extend, otherwise use a zero extend.
1924 if (Args[i].second->isSigned())
1925 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
1927 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
1930 if (GPR_remaining > 0) {
1931 args_to_use.push_back(Args[i].first);
1934 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1935 Args[i].first, PtrOff,
1936 DAG.getSrcValue(NULL)));
1941 // If we have one free GPR left, we can place the upper half of the i64
1942 // in it, and store the other half to the stack. If we have two or more
1943 // free GPRs, then we can pass both halves of the i64 in registers.
1944 if (GPR_remaining > 0) {
1945 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
1946 Args[i].first, DAG.getConstant(1, MVT::i32));
1947 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
1948 Args[i].first, DAG.getConstant(0, MVT::i32));
1949 args_to_use.push_back(Hi);
1951 if (GPR_remaining > 0) {
1952 args_to_use.push_back(Lo);
1955 SDOperand ConstFour = DAG.getConstant(4, getPointerTy());
1956 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour);
1957 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1958 Lo, PtrOff, DAG.getSrcValue(NULL)));
1961 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1962 Args[i].first, PtrOff,
1963 DAG.getSrcValue(NULL)));
1969 if (FPR_remaining > 0) {
1970 args_to_use.push_back(Args[i].first);
1973 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Chain,
1974 Args[i].first, PtrOff,
1975 DAG.getSrcValue(NULL));
1976 MemOps.push_back(Store);
1977 // Float varargs are always shadowed in available integer registers
1978 if (GPR_remaining > 0) {
1979 SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff,
1980 DAG.getSrcValue(NULL));
1981 MemOps.push_back(Load.getValue(1));
1982 args_to_use.push_back(Load);
1985 if (GPR_remaining > 0 && MVT::f64 == ArgVT) {
1986 SDOperand ConstFour = DAG.getConstant(4, getPointerTy());
1987 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour);
1988 SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff,
1989 DAG.getSrcValue(NULL));
1990 MemOps.push_back(Load.getValue(1));
1991 args_to_use.push_back(Load);
1995 // If we have any FPRs remaining, we may also have GPRs remaining.
1996 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
1998 if (GPR_remaining > 0) {
1999 args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32));
2002 if (GPR_remaining > 0 && MVT::f64 == ArgVT) {
2003 args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32));
2008 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
2009 Args[i].first, PtrOff,
2010 DAG.getSrcValue(NULL)));
2012 ArgOffset += (ArgVT == MVT::f32) ? 4 : 8;
2016 if (!MemOps.empty())
2017 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps);
2020 std::vector<MVT::ValueType> RetVals;
2021 MVT::ValueType RetTyVT = getValueType(RetTy);
2022 MVT::ValueType ActualRetTyVT = RetTyVT;
2023 if (RetTyVT >= MVT::i1 && RetTyVT <= MVT::i16)
2024 ActualRetTyVT = MVT::i32; // Promote result to i32.
2026 if (RetTyVT == MVT::i64) {
2027 RetVals.push_back(MVT::i32);
2028 RetVals.push_back(MVT::i32);
2029 } else if (RetTyVT != MVT::isVoid) {
2030 RetVals.push_back(ActualRetTyVT);
2032 RetVals.push_back(MVT::Other);
2034 // If the callee is a GlobalAddress node (quite common, every direct call is)
2035 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
2036 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
2037 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
2039 std::vector<SDOperand> Ops;
2040 Ops.push_back(Chain);
2041 Ops.push_back(Callee);
2042 Ops.insert(Ops.end(), args_to_use.begin(), args_to_use.end());
2043 SDOperand TheCall = DAG.getNode(PPCISD::CALL, RetVals, Ops);
2044 Chain = TheCall.getValue(TheCall.Val->getNumValues()-1);
2045 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
2046 DAG.getConstant(NumBytes, getPointerTy()));
2047 SDOperand RetVal = TheCall;
2049 // If the result is a small value, add a note so that we keep track of the
2050 // information about whether it is sign or zero extended.
2051 if (RetTyVT != ActualRetTyVT) {
2052 RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext : ISD::AssertZext,
2053 MVT::i32, RetVal, DAG.getValueType(RetTyVT));
2054 RetVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, RetVal);
2055 } else if (RetTyVT == MVT::i64) {
2056 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, RetVal, RetVal.getValue(1));
2059 return std::make_pair(RetVal, Chain);
2063 PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
2064 MachineBasicBlock *BB) {
2065 assert((MI->getOpcode() == PPC::SELECT_CC_Int ||
2066 MI->getOpcode() == PPC::SELECT_CC_F4 ||
2067 MI->getOpcode() == PPC::SELECT_CC_F8 ||
2068 MI->getOpcode() == PPC::SELECT_CC_VRRC) &&
2069 "Unexpected instr type to insert");
2071 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
2072 // control-flow pattern. The incoming instruction knows the destination vreg
2073 // to set, the condition code register to branch on, the true/false values to
2074 // select between, and a branch opcode to use.
2075 const BasicBlock *LLVM_BB = BB->getBasicBlock();
2076 ilist<MachineBasicBlock>::iterator It = BB;
2082 // cmpTY ccX, r1, r2
2084 // fallthrough --> copy0MBB
2085 MachineBasicBlock *thisMBB = BB;
2086 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
2087 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
2088 BuildMI(BB, MI->getOperand(4).getImmedValue(), 2)
2089 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
2090 MachineFunction *F = BB->getParent();
2091 F->getBasicBlockList().insert(It, copy0MBB);
2092 F->getBasicBlockList().insert(It, sinkMBB);
2093 // Update machine-CFG edges by first adding all successors of the current
2094 // block to the new block which will contain the Phi node for the select.
2095 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
2096 e = BB->succ_end(); i != e; ++i)
2097 sinkMBB->addSuccessor(*i);
2098 // Next, remove all successors of the current block, and add the true
2099 // and fallthrough blocks as its successors.
2100 while(!BB->succ_empty())
2101 BB->removeSuccessor(BB->succ_begin());
2102 BB->addSuccessor(copy0MBB);
2103 BB->addSuccessor(sinkMBB);
2106 // %FalseValue = ...
2107 // # fallthrough to sinkMBB
2110 // Update machine-CFG edges
2111 BB->addSuccessor(sinkMBB);
2114 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
2117 BuildMI(BB, PPC::PHI, 4, MI->getOperand(0).getReg())
2118 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
2119 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
2121 delete MI; // The pseudo instruction is gone now.
2125 //===----------------------------------------------------------------------===//
2126 // Target Optimization Hooks
2127 //===----------------------------------------------------------------------===//
2129 SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N,
2130 DAGCombinerInfo &DCI) const {
2131 TargetMachine &TM = getTargetMachine();
2132 SelectionDAG &DAG = DCI.DAG;
2133 switch (N->getOpcode()) {
2135 case ISD::SINT_TO_FP:
2136 if (TM.getSubtarget<PPCSubtarget>().is64Bit()) {
2137 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
2138 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
2139 // We allow the src/dst to be either f32/f64, but the intermediate
2140 // type must be i64.
2141 if (N->getOperand(0).getValueType() == MVT::i64) {
2142 SDOperand Val = N->getOperand(0).getOperand(0);
2143 if (Val.getValueType() == MVT::f32) {
2144 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
2145 DCI.AddToWorklist(Val.Val);
2148 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val);
2149 DCI.AddToWorklist(Val.Val);
2150 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val);
2151 DCI.AddToWorklist(Val.Val);
2152 if (N->getValueType(0) == MVT::f32) {
2153 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val);
2154 DCI.AddToWorklist(Val.Val);
2157 } else if (N->getOperand(0).getValueType() == MVT::i32) {
2158 // If the intermediate type is i32, we can avoid the load/store here
2165 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
2166 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
2167 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
2168 N->getOperand(1).getValueType() == MVT::i32) {
2169 SDOperand Val = N->getOperand(1).getOperand(0);
2170 if (Val.getValueType() == MVT::f32) {
2171 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
2172 DCI.AddToWorklist(Val.Val);
2174 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val);
2175 DCI.AddToWorklist(Val.Val);
2177 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val,
2178 N->getOperand(2), N->getOperand(3));
2179 DCI.AddToWorklist(Val.Val);
2183 case PPCISD::VCMP: {
2184 // If a VCMPo node already exists with exactly the same operands as this
2185 // node, use its result instead of this node (VCMPo computes both a CR6 and
2186 // a normal output).
2188 if (!N->getOperand(0).hasOneUse() &&
2189 !N->getOperand(1).hasOneUse() &&
2190 !N->getOperand(2).hasOneUse()) {
2192 // Scan all of the users of the LHS, looking for VCMPo's that match.
2193 SDNode *VCMPoNode = 0;
2195 SDNode *LHSN = N->getOperand(0).Val;
2196 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
2198 if ((*UI)->getOpcode() == PPCISD::VCMPo &&
2199 (*UI)->getOperand(1) == N->getOperand(1) &&
2200 (*UI)->getOperand(2) == N->getOperand(2) &&
2201 (*UI)->getOperand(0) == N->getOperand(0)) {
2206 // If there are non-zero uses of the flag value, use the VCMPo node!
2207 if (VCMPoNode && !VCMPoNode->hasNUsesOfValue(0, 1))
2208 return SDOperand(VCMPoNode, 0);
2213 // If this is a branch on an altivec predicate comparison, lower this so
2214 // that we don't have to do a MFCR: instead, branch directly on CR6. This
2215 // lowering is done pre-legalize, because the legalizer lowers the predicate
2216 // compare down to code that is difficult to reassemble.
2217 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
2218 SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3);
2222 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
2223 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
2224 getAltivecCompareInfo(LHS, CompareOpc, isDot)) {
2225 assert(isDot && "Can't compare against a vector result!");
2227 // If this is a comparison against something other than 0/1, then we know
2228 // that the condition is never/always true.
2229 unsigned Val = cast<ConstantSDNode>(RHS)->getValue();
2230 if (Val != 0 && Val != 1) {
2231 if (CC == ISD::SETEQ) // Cond never true, remove branch.
2232 return N->getOperand(0);
2233 // Always !=, turn it into an unconditional branch.
2234 return DAG.getNode(ISD::BR, MVT::Other,
2235 N->getOperand(0), N->getOperand(4));
2238 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
2240 // Create the PPCISD altivec 'dot' comparison node.
2241 std::vector<SDOperand> Ops;
2242 std::vector<MVT::ValueType> VTs;
2243 Ops.push_back(LHS.getOperand(2)); // LHS of compare
2244 Ops.push_back(LHS.getOperand(3)); // RHS of compare
2245 Ops.push_back(DAG.getConstant(CompareOpc, MVT::i32));
2246 VTs.push_back(LHS.getOperand(2).getValueType());
2247 VTs.push_back(MVT::Flag);
2248 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops);
2250 // Unpack the result based on how the target uses it.
2252 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) {
2253 default: // Can't happen, don't crash on invalid number though.
2254 case 0: // Branch on the value of the EQ bit of CR6.
2255 CompOpc = BranchOnWhenPredTrue ? PPC::BEQ : PPC::BNE;
2257 case 1: // Branch on the inverted value of the EQ bit of CR6.
2258 CompOpc = BranchOnWhenPredTrue ? PPC::BNE : PPC::BEQ;
2260 case 2: // Branch on the value of the LT bit of CR6.
2261 CompOpc = BranchOnWhenPredTrue ? PPC::BLT : PPC::BGE;
2263 case 3: // Branch on the inverted value of the LT bit of CR6.
2264 CompOpc = BranchOnWhenPredTrue ? PPC::BGE : PPC::BLT;
2268 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0),
2269 DAG.getRegister(PPC::CR6, MVT::i32),
2270 DAG.getConstant(CompOpc, MVT::i32),
2271 N->getOperand(4), CompNode.getValue(1));
2280 //===----------------------------------------------------------------------===//
2281 // Inline Assembly Support
2282 //===----------------------------------------------------------------------===//
2284 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
2286 uint64_t &KnownZero,
2288 unsigned Depth) const {
2291 switch (Op.getOpcode()) {
2293 case ISD::INTRINSIC_WO_CHAIN: {
2294 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) {
2296 case Intrinsic::ppc_altivec_vcmpbfp_p:
2297 case Intrinsic::ppc_altivec_vcmpeqfp_p:
2298 case Intrinsic::ppc_altivec_vcmpequb_p:
2299 case Intrinsic::ppc_altivec_vcmpequh_p:
2300 case Intrinsic::ppc_altivec_vcmpequw_p:
2301 case Intrinsic::ppc_altivec_vcmpgefp_p:
2302 case Intrinsic::ppc_altivec_vcmpgtfp_p:
2303 case Intrinsic::ppc_altivec_vcmpgtsb_p:
2304 case Intrinsic::ppc_altivec_vcmpgtsh_p:
2305 case Intrinsic::ppc_altivec_vcmpgtsw_p:
2306 case Intrinsic::ppc_altivec_vcmpgtub_p:
2307 case Intrinsic::ppc_altivec_vcmpgtuh_p:
2308 case Intrinsic::ppc_altivec_vcmpgtuw_p:
2309 KnownZero = ~1U; // All bits but the low one are known to be zero.
2317 /// getConstraintType - Given a constraint letter, return the type of
2318 /// constraint it is for this target.
2319 PPCTargetLowering::ConstraintType
2320 PPCTargetLowering::getConstraintType(char ConstraintLetter) const {
2321 switch (ConstraintLetter) {
2328 return C_RegisterClass;
2330 return TargetLowering::getConstraintType(ConstraintLetter);
2334 std::vector<unsigned> PPCTargetLowering::
2335 getRegClassForInlineAsmConstraint(const std::string &Constraint,
2336 MVT::ValueType VT) const {
2337 if (Constraint.size() == 1) {
2338 switch (Constraint[0]) { // GCC RS6000 Constraint Letters
2339 default: break; // Unknown constriant letter
2341 return make_vector<unsigned>(/*no R0*/ PPC::R1 , PPC::R2 , PPC::R3 ,
2342 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 ,
2343 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11,
2344 PPC::R12, PPC::R13, PPC::R14, PPC::R15,
2345 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
2346 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
2347 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
2348 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
2351 return make_vector<unsigned>(PPC::R0 , PPC::R1 , PPC::R2 , PPC::R3 ,
2352 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 ,
2353 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11,
2354 PPC::R12, PPC::R13, PPC::R14, PPC::R15,
2355 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
2356 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
2357 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
2358 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
2361 return make_vector<unsigned>(PPC::F0 , PPC::F1 , PPC::F2 , PPC::F3 ,
2362 PPC::F4 , PPC::F5 , PPC::F6 , PPC::F7 ,
2363 PPC::F8 , PPC::F9 , PPC::F10, PPC::F11,
2364 PPC::F12, PPC::F13, PPC::F14, PPC::F15,
2365 PPC::F16, PPC::F17, PPC::F18, PPC::F19,
2366 PPC::F20, PPC::F21, PPC::F22, PPC::F23,
2367 PPC::F24, PPC::F25, PPC::F26, PPC::F27,
2368 PPC::F28, PPC::F29, PPC::F30, PPC::F31,
2371 return make_vector<unsigned>(PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 ,
2372 PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 ,
2373 PPC::V8 , PPC::V9 , PPC::V10, PPC::V11,
2374 PPC::V12, PPC::V13, PPC::V14, PPC::V15,
2375 PPC::V16, PPC::V17, PPC::V18, PPC::V19,
2376 PPC::V20, PPC::V21, PPC::V22, PPC::V23,
2377 PPC::V24, PPC::V25, PPC::V26, PPC::V27,
2378 PPC::V28, PPC::V29, PPC::V30, PPC::V31,
2381 return make_vector<unsigned>(PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3,
2382 PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7,
2387 return std::vector<unsigned>();
2390 // isOperandValidForConstraint
2391 bool PPCTargetLowering::
2392 isOperandValidForConstraint(SDOperand Op, char Letter) {
2403 if (!isa<ConstantSDNode>(Op)) return false; // Must be an immediate.
2404 unsigned Value = cast<ConstantSDNode>(Op)->getValue();
2406 default: assert(0 && "Unknown constraint letter!");
2407 case 'I': // "I" is a signed 16-bit constant.
2408 return (short)Value == (int)Value;
2409 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
2410 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
2411 return (short)Value == 0;
2412 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
2413 return (Value >> 16) == 0;
2414 case 'M': // "M" is a constant that is greater than 31.
2416 case 'N': // "N" is a positive constant that is an exact power of two.
2417 return (int)Value > 0 && isPowerOf2_32(Value);
2418 case 'O': // "O" is the constant zero.
2420 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
2421 return (short)-Value == (int)-Value;
2427 // Handle standard constraint letters.
2428 return TargetLowering::isOperandValidForConstraint(Op, Letter);
2431 /// isLegalAddressImmediate - Return true if the integer value can be used
2432 /// as the offset of the target addressing mode.
2433 bool PPCTargetLowering::isLegalAddressImmediate(int64_t V) const {
2434 // PPC allows a sign-extended 16-bit immediate field.
2435 return (V > -(1 << 16) && V < (1 << 16)-1);