1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the PPCISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "PPCISelLowering.h"
15 #include "PPCTargetMachine.h"
16 #include "PPCPerfectShuffle.h"
17 #include "llvm/ADT/VectorExtras.h"
18 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/SSARegMap.h"
24 #include "llvm/Constants.h"
25 #include "llvm/Function.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Target/TargetOptions.h"
29 #include "llvm/Support/CommandLine.h"
32 static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc");
34 PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
35 : TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) {
37 // Fold away setcc operations if possible.
38 setSetCCIsExpensive();
41 // Use _setjmp/_longjmp instead of setjmp/longjmp.
42 setUseUnderscoreSetJmpLongJmp(true);
44 // Set up the register classes.
45 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
46 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
47 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
49 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
50 setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
51 setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
53 // PowerPC does not have truncstore for i1.
54 setStoreXAction(MVT::i1, Promote);
56 // PowerPC has pre-inc load and store's.
57 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
58 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
59 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
60 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
61 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
62 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
63 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
64 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
65 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
66 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
68 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
69 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
71 // PowerPC has no intrinsics for these particular operations
72 setOperationAction(ISD::MEMMOVE, MVT::Other, Expand);
73 setOperationAction(ISD::MEMSET, MVT::Other, Expand);
74 setOperationAction(ISD::MEMCPY, MVT::Other, Expand);
76 // PowerPC has no SREM/UREM instructions
77 setOperationAction(ISD::SREM, MVT::i32, Expand);
78 setOperationAction(ISD::UREM, MVT::i32, Expand);
79 setOperationAction(ISD::SREM, MVT::i64, Expand);
80 setOperationAction(ISD::UREM, MVT::i64, Expand);
82 // We don't support sin/cos/sqrt/fmod
83 setOperationAction(ISD::FSIN , MVT::f64, Expand);
84 setOperationAction(ISD::FCOS , MVT::f64, Expand);
85 setOperationAction(ISD::FREM , MVT::f64, Expand);
86 setOperationAction(ISD::FSIN , MVT::f32, Expand);
87 setOperationAction(ISD::FCOS , MVT::f32, Expand);
88 setOperationAction(ISD::FREM , MVT::f32, Expand);
90 // If we're enabling GP optimizations, use hardware square root
91 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
92 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
93 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
96 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
97 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
99 // PowerPC does not have BSWAP, CTPOP or CTTZ
100 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
101 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
102 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
103 setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
104 setOperationAction(ISD::CTPOP, MVT::i64 , Expand);
105 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
107 // PowerPC does not have ROTR
108 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
110 // PowerPC does not have Select
111 setOperationAction(ISD::SELECT, MVT::i32, Expand);
112 setOperationAction(ISD::SELECT, MVT::i64, Expand);
113 setOperationAction(ISD::SELECT, MVT::f32, Expand);
114 setOperationAction(ISD::SELECT, MVT::f64, Expand);
116 // PowerPC wants to turn select_cc of FP into fsel when possible.
117 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
118 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
120 // PowerPC wants to optimize integer setcc a bit
121 setOperationAction(ISD::SETCC, MVT::i32, Custom);
123 // PowerPC does not have BRCOND which requires SetCC
124 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
126 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
128 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
129 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
131 // PowerPC does not have [U|S]INT_TO_FP
132 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
133 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
135 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
136 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
137 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand);
138 setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand);
140 // We cannot sextinreg(i1). Expand to shifts.
141 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
144 // Support label based line numbers.
145 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
146 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
147 // FIXME - use subtarget debug flags
148 if (!TM.getSubtarget<PPCSubtarget>().isDarwin())
149 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
151 // We want to legalize GlobalAddress and ConstantPool nodes into the
152 // appropriate instructions to materialize the address.
153 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
154 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
155 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
156 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
157 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
158 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
160 // RET must be custom lowered, to meet ABI requirements
161 setOperationAction(ISD::RET , MVT::Other, Custom);
163 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
164 setOperationAction(ISD::VASTART , MVT::Other, Custom);
166 // Use the default implementation.
167 setOperationAction(ISD::VAARG , MVT::Other, Expand);
168 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
169 setOperationAction(ISD::VAEND , MVT::Other, Expand);
170 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
171 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
172 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
173 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Expand);
175 // We want to custom lower some of our intrinsics.
176 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
178 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
179 // They also have instructions for converting between i64 and fp.
180 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
181 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
183 // FIXME: disable this lowered code. This generates 64-bit register values,
184 // and we don't model the fact that the top part is clobbered by calls. We
185 // need to flag these together so that the value isn't live across a call.
186 //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
188 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
189 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
191 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
192 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
195 if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) {
196 // 64 bit PowerPC implementations can support i64 types directly
197 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
198 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
199 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
201 // 32 bit PowerPC wants to expand i64 shifts itself.
202 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
203 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
204 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
207 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
208 // First set operation action for all vector types to expand. Then we
209 // will selectively turn on ones that can be effectively codegen'd.
210 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
211 VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
212 // add/sub are legal for all supported vector VT's.
213 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal);
214 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal);
216 // We promote all shuffles to v16i8.
217 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote);
218 AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8);
220 // We promote all non-typed operations to v4i32.
221 setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote);
222 AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32);
223 setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote);
224 AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32);
225 setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote);
226 AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32);
227 setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote);
228 AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32);
229 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
230 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32);
231 setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote);
232 AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32);
234 // No other operations are legal.
235 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
236 setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
237 setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
238 setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
239 setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
240 setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand);
241 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
242 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
243 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand);
245 setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand);
248 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
249 // with merges, splats, etc.
250 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
252 setOperationAction(ISD::AND , MVT::v4i32, Legal);
253 setOperationAction(ISD::OR , MVT::v4i32, Legal);
254 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
255 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
256 setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
257 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
259 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
260 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
261 addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
262 addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
264 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
265 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
266 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
267 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
269 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
270 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
272 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
273 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
274 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
275 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
278 setSetCCResultType(MVT::i32);
279 setShiftAmountType(MVT::i32);
280 setSetCCResultContents(ZeroOrOneSetCCResult);
282 if (TM.getSubtarget<PPCSubtarget>().isPPC64())
283 setStackPointerRegisterToSaveRestore(PPC::X1);
285 setStackPointerRegisterToSaveRestore(PPC::R1);
287 // We have target-specific dag combine patterns for the following nodes:
288 setTargetDAGCombine(ISD::SINT_TO_FP);
289 setTargetDAGCombine(ISD::STORE);
290 setTargetDAGCombine(ISD::BR_CC);
291 setTargetDAGCombine(ISD::BSWAP);
293 computeRegisterProperties();
296 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
299 case PPCISD::FSEL: return "PPCISD::FSEL";
300 case PPCISD::FCFID: return "PPCISD::FCFID";
301 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
302 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
303 case PPCISD::STFIWX: return "PPCISD::STFIWX";
304 case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
305 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
306 case PPCISD::VPERM: return "PPCISD::VPERM";
307 case PPCISD::Hi: return "PPCISD::Hi";
308 case PPCISD::Lo: return "PPCISD::Lo";
309 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
310 case PPCISD::SRL: return "PPCISD::SRL";
311 case PPCISD::SRA: return "PPCISD::SRA";
312 case PPCISD::SHL: return "PPCISD::SHL";
313 case PPCISD::EXTSW_32: return "PPCISD::EXTSW_32";
314 case PPCISD::STD_32: return "PPCISD::STD_32";
315 case PPCISD::CALL: return "PPCISD::CALL";
316 case PPCISD::MTCTR: return "PPCISD::MTCTR";
317 case PPCISD::BCTRL: return "PPCISD::BCTRL";
318 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
319 case PPCISD::MFCR: return "PPCISD::MFCR";
320 case PPCISD::VCMP: return "PPCISD::VCMP";
321 case PPCISD::VCMPo: return "PPCISD::VCMPo";
322 case PPCISD::LBRX: return "PPCISD::LBRX";
323 case PPCISD::STBRX: return "PPCISD::STBRX";
324 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
328 //===----------------------------------------------------------------------===//
329 // Node matching predicates, for use by the tblgen matching code.
330 //===----------------------------------------------------------------------===//
332 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
333 static bool isFloatingPointZero(SDOperand Op) {
334 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
335 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
336 else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) {
337 // Maybe this has already been legalized into the constant pool?
338 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
339 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
340 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
345 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
346 /// true if Op is undef or if it matches the specified value.
347 static bool isConstantOrUndef(SDOperand Op, unsigned Val) {
348 return Op.getOpcode() == ISD::UNDEF ||
349 cast<ConstantSDNode>(Op)->getValue() == Val;
352 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
353 /// VPKUHUM instruction.
354 bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) {
356 for (unsigned i = 0; i != 16; ++i)
357 if (!isConstantOrUndef(N->getOperand(i), i*2+1))
360 for (unsigned i = 0; i != 8; ++i)
361 if (!isConstantOrUndef(N->getOperand(i), i*2+1) ||
362 !isConstantOrUndef(N->getOperand(i+8), i*2+1))
368 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
369 /// VPKUWUM instruction.
370 bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) {
372 for (unsigned i = 0; i != 16; i += 2)
373 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
374 !isConstantOrUndef(N->getOperand(i+1), i*2+3))
377 for (unsigned i = 0; i != 8; i += 2)
378 if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
379 !isConstantOrUndef(N->getOperand(i+1), i*2+3) ||
380 !isConstantOrUndef(N->getOperand(i+8), i*2+2) ||
381 !isConstantOrUndef(N->getOperand(i+9), i*2+3))
387 /// isVMerge - Common function, used to match vmrg* shuffles.
389 static bool isVMerge(SDNode *N, unsigned UnitSize,
390 unsigned LHSStart, unsigned RHSStart) {
391 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
392 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
393 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
394 "Unsupported merge size!");
396 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
397 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
398 if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j),
399 LHSStart+j+i*UnitSize) ||
400 !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j),
401 RHSStart+j+i*UnitSize))
407 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
408 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
409 bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
411 return isVMerge(N, UnitSize, 8, 24);
412 return isVMerge(N, UnitSize, 8, 8);
415 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
416 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
417 bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
419 return isVMerge(N, UnitSize, 0, 16);
420 return isVMerge(N, UnitSize, 0, 0);
424 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
425 /// amount, otherwise return -1.
426 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
427 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
428 N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
429 // Find the first non-undef value in the shuffle mask.
431 for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i)
434 if (i == 16) return -1; // all undef.
436 // Otherwise, check to see if the rest of the elements are consequtively
437 // numbered from this value.
438 unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue();
439 if (ShiftAmt < i) return -1;
443 // Check the rest of the elements to see if they are consequtive.
444 for (++i; i != 16; ++i)
445 if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i))
448 // Check the rest of the elements to see if they are consequtive.
449 for (++i; i != 16; ++i)
450 if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15))
457 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
458 /// specifies a splat of a single element that is suitable for input to
459 /// VSPLTB/VSPLTH/VSPLTW.
460 bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) {
461 assert(N->getOpcode() == ISD::BUILD_VECTOR &&
462 N->getNumOperands() == 16 &&
463 (EltSize == 1 || EltSize == 2 || EltSize == 4));
465 // This is a splat operation if each element of the permute is the same, and
466 // if the value doesn't reference the second vector.
467 unsigned ElementBase = 0;
468 SDOperand Elt = N->getOperand(0);
469 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt))
470 ElementBase = EltV->getValue();
472 return false; // FIXME: Handle UNDEF elements too!
474 if (cast<ConstantSDNode>(Elt)->getValue() >= 16)
477 // Check that they are consequtive.
478 for (unsigned i = 1; i != EltSize; ++i) {
479 if (!isa<ConstantSDNode>(N->getOperand(i)) ||
480 cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase)
484 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
485 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
486 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
487 assert(isa<ConstantSDNode>(N->getOperand(i)) &&
488 "Invalid VECTOR_SHUFFLE mask!");
489 for (unsigned j = 0; j != EltSize; ++j)
490 if (N->getOperand(i+j) != N->getOperand(j))
497 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
498 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
499 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
500 assert(isSplatShuffleMask(N, EltSize));
501 return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize;
504 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
505 /// by using a vspltis[bhw] instruction of the specified element size, return
506 /// the constant being splatted. The ByteSize field indicates the number of
507 /// bytes of each element [124] -> [bhw].
508 SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
509 SDOperand OpVal(0, 0);
511 // If ByteSize of the splat is bigger than the element size of the
512 // build_vector, then we have a case where we are checking for a splat where
513 // multiple elements of the buildvector are folded together into a single
514 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
515 unsigned EltSize = 16/N->getNumOperands();
516 if (EltSize < ByteSize) {
517 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
518 SDOperand UniquedVals[4];
519 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
521 // See if all of the elements in the buildvector agree across.
522 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
523 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
524 // If the element isn't a constant, bail fully out.
525 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand();
528 if (UniquedVals[i&(Multiple-1)].Val == 0)
529 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
530 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
531 return SDOperand(); // no match.
534 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
535 // either constant or undef values that are identical for each chunk. See
536 // if these chunks can form into a larger vspltis*.
538 // Check to see if all of the leading entries are either 0 or -1. If
539 // neither, then this won't fit into the immediate field.
540 bool LeadingZero = true;
541 bool LeadingOnes = true;
542 for (unsigned i = 0; i != Multiple-1; ++i) {
543 if (UniquedVals[i].Val == 0) continue; // Must have been undefs.
545 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
546 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
548 // Finally, check the least significant entry.
550 if (UniquedVals[Multiple-1].Val == 0)
551 return DAG.getTargetConstant(0, MVT::i32); // 0,0,0,undef
552 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getValue();
554 return DAG.getTargetConstant(Val, MVT::i32); // 0,0,0,4 -> vspltisw(4)
557 if (UniquedVals[Multiple-1].Val == 0)
558 return DAG.getTargetConstant(~0U, MVT::i32); // -1,-1,-1,undef
559 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSignExtended();
560 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
561 return DAG.getTargetConstant(Val, MVT::i32);
567 // Check to see if this buildvec has a single non-undef value in its elements.
568 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
569 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
571 OpVal = N->getOperand(i);
572 else if (OpVal != N->getOperand(i))
576 if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def.
578 unsigned ValSizeInBytes = 0;
580 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
581 Value = CN->getValue();
582 ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8;
583 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
584 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
585 Value = FloatToBits(CN->getValue());
589 // If the splat value is larger than the element value, then we can never do
590 // this splat. The only case that we could fit the replicated bits into our
591 // immediate field for would be zero, and we prefer to use vxor for it.
592 if (ValSizeInBytes < ByteSize) return SDOperand();
594 // If the element value is larger than the splat value, cut it in half and
595 // check to see if the two halves are equal. Continue doing this until we
596 // get to ByteSize. This allows us to handle 0x01010101 as 0x01.
597 while (ValSizeInBytes > ByteSize) {
598 ValSizeInBytes >>= 1;
600 // If the top half equals the bottom half, we're still ok.
601 if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
602 (Value & ((1 << (8*ValSizeInBytes))-1)))
606 // Properly sign extend the value.
607 int ShAmt = (4-ByteSize)*8;
608 int MaskVal = ((int)Value << ShAmt) >> ShAmt;
610 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
611 if (MaskVal == 0) return SDOperand();
613 // Finally, if this value fits in a 5 bit sext field, return it
614 if (((MaskVal << (32-5)) >> (32-5)) == MaskVal)
615 return DAG.getTargetConstant(MaskVal, MVT::i32);
619 //===----------------------------------------------------------------------===//
620 // Addressing Mode Selection
621 //===----------------------------------------------------------------------===//
623 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
624 /// or 64-bit immediate, and if the value can be accurately represented as a
625 /// sign extension from a 16-bit value. If so, this returns true and the
627 static bool isIntS16Immediate(SDNode *N, short &Imm) {
628 if (N->getOpcode() != ISD::Constant)
631 Imm = (short)cast<ConstantSDNode>(N)->getValue();
632 if (N->getValueType(0) == MVT::i32)
633 return Imm == (int32_t)cast<ConstantSDNode>(N)->getValue();
635 return Imm == (int64_t)cast<ConstantSDNode>(N)->getValue();
637 static bool isIntS16Immediate(SDOperand Op, short &Imm) {
638 return isIntS16Immediate(Op.Val, Imm);
642 /// SelectAddressRegReg - Given the specified addressed, check to see if it
643 /// can be represented as an indexed [r+r] operation. Returns false if it
644 /// can be more efficiently represented with [r+imm].
645 bool PPCTargetLowering::SelectAddressRegReg(SDOperand N, SDOperand &Base,
649 if (N.getOpcode() == ISD::ADD) {
650 if (isIntS16Immediate(N.getOperand(1), imm))
652 if (N.getOperand(1).getOpcode() == PPCISD::Lo)
655 Base = N.getOperand(0);
656 Index = N.getOperand(1);
658 } else if (N.getOpcode() == ISD::OR) {
659 if (isIntS16Immediate(N.getOperand(1), imm))
660 return false; // r+i can fold it if we can.
662 // If this is an or of disjoint bitfields, we can codegen this as an add
663 // (for better address arithmetic) if the LHS and RHS of the OR are provably
665 uint64_t LHSKnownZero, LHSKnownOne;
666 uint64_t RHSKnownZero, RHSKnownOne;
667 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
670 ComputeMaskedBits(N.getOperand(1), ~0U, RHSKnownZero, RHSKnownOne);
671 // If all of the bits are known zero on the LHS or RHS, the add won't
673 if ((LHSKnownZero | RHSKnownZero) == ~0U) {
674 Base = N.getOperand(0);
675 Index = N.getOperand(1);
684 /// Returns true if the address N can be represented by a base register plus
685 /// a signed 16-bit displacement [r+imm], and if it is not better
686 /// represented as reg+reg.
687 bool PPCTargetLowering::SelectAddressRegImm(SDOperand N, SDOperand &Disp,
688 SDOperand &Base, SelectionDAG &DAG){
689 // If this can be more profitably realized as r+r, fail.
690 if (SelectAddressRegReg(N, Disp, Base, DAG))
693 if (N.getOpcode() == ISD::ADD) {
695 if (isIntS16Immediate(N.getOperand(1), imm)) {
696 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
697 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
698 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
700 Base = N.getOperand(0);
702 return true; // [r+i]
703 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
704 // Match LOAD (ADD (X, Lo(G))).
705 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue()
706 && "Cannot handle constant offsets yet!");
707 Disp = N.getOperand(1).getOperand(0); // The global address.
708 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
709 Disp.getOpcode() == ISD::TargetConstantPool ||
710 Disp.getOpcode() == ISD::TargetJumpTable);
711 Base = N.getOperand(0);
712 return true; // [&g+r]
714 } else if (N.getOpcode() == ISD::OR) {
716 if (isIntS16Immediate(N.getOperand(1), imm)) {
717 // If this is an or of disjoint bitfields, we can codegen this as an add
718 // (for better address arithmetic) if the LHS and RHS of the OR are
719 // provably disjoint.
720 uint64_t LHSKnownZero, LHSKnownOne;
721 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
722 if ((LHSKnownZero|~(unsigned)imm) == ~0U) {
723 // If all of the bits are known zero on the LHS or RHS, the add won't
725 Base = N.getOperand(0);
726 Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
730 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
731 // Loading from a constant address.
733 // If this address fits entirely in a 16-bit sext immediate field, codegen
736 if (isIntS16Immediate(CN, Imm)) {
737 Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
738 Base = DAG.getRegister(PPC::R0, CN->getValueType(0));
742 // FIXME: Handle small sext constant offsets in PPC64 mode also!
743 if (CN->getValueType(0) == MVT::i32) {
744 int Addr = (int)CN->getValue();
746 // Otherwise, break this down into an LIS + disp.
747 Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
748 Base = DAG.getConstant(Addr - (signed short)Addr, MVT::i32);
753 Disp = DAG.getTargetConstant(0, getPointerTy());
754 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
755 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
758 return true; // [r+0]
761 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
762 /// represented as an indexed [r+r] operation.
763 bool PPCTargetLowering::SelectAddressRegRegOnly(SDOperand N, SDOperand &Base,
766 // Check to see if we can easily represent this as an [r+r] address. This
767 // will fail if it thinks that the address is more profitably represented as
768 // reg+imm, e.g. where imm = 0.
769 if (SelectAddressRegReg(N, Base, Index, DAG))
772 // If the operand is an addition, always emit this as [r+r], since this is
773 // better (for code size, and execution, as the memop does the add for free)
774 // than emitting an explicit add.
775 if (N.getOpcode() == ISD::ADD) {
776 Base = N.getOperand(0);
777 Index = N.getOperand(1);
781 // Otherwise, do it the hard way, using R0 as the base register.
782 Base = DAG.getRegister(PPC::R0, N.getValueType());
787 /// SelectAddressRegImmShift - Returns true if the address N can be
788 /// represented by a base register plus a signed 14-bit displacement
789 /// [r+imm*4]. Suitable for use by STD and friends.
790 bool PPCTargetLowering::SelectAddressRegImmShift(SDOperand N, SDOperand &Disp,
793 // If this can be more profitably realized as r+r, fail.
794 if (SelectAddressRegReg(N, Disp, Base, DAG))
797 if (N.getOpcode() == ISD::ADD) {
799 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
800 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
801 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
802 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
804 Base = N.getOperand(0);
806 return true; // [r+i]
807 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
808 // Match LOAD (ADD (X, Lo(G))).
809 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getValue()
810 && "Cannot handle constant offsets yet!");
811 Disp = N.getOperand(1).getOperand(0); // The global address.
812 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
813 Disp.getOpcode() == ISD::TargetConstantPool ||
814 Disp.getOpcode() == ISD::TargetJumpTable);
815 Base = N.getOperand(0);
816 return true; // [&g+r]
818 } else if (N.getOpcode() == ISD::OR) {
820 if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
821 // If this is an or of disjoint bitfields, we can codegen this as an add
822 // (for better address arithmetic) if the LHS and RHS of the OR are
823 // provably disjoint.
824 uint64_t LHSKnownZero, LHSKnownOne;
825 ComputeMaskedBits(N.getOperand(0), ~0U, LHSKnownZero, LHSKnownOne);
826 if ((LHSKnownZero|~(unsigned)imm) == ~0U) {
827 // If all of the bits are known zero on the LHS or RHS, the add won't
829 Base = N.getOperand(0);
830 Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
834 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
835 // Loading from a constant address.
837 // If this address fits entirely in a 14-bit sext immediate field, codegen
840 if (isIntS16Immediate(CN, Imm)) {
841 Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy());
842 Base = DAG.getRegister(PPC::R0, CN->getValueType(0));
846 // FIXME: Handle small sext constant offsets in PPC64 mode also!
847 if (CN->getValueType(0) == MVT::i32) {
848 int Addr = (int)CN->getValue();
850 // Otherwise, break this down into an LIS + disp.
851 Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32);
852 Base = DAG.getConstant(Addr - (signed short)Addr, MVT::i32);
857 Disp = DAG.getTargetConstant(0, getPointerTy());
858 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
859 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
862 return true; // [r+0]
866 /// getPreIndexedAddressParts - returns true by value, base pointer and
867 /// offset pointer and addressing mode by reference if the node's address
868 /// can be legally represented as pre-indexed load / store address.
869 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base,
871 ISD::MemIndexedMode &AM,
873 // Disabled by default for now.
874 if (!EnablePPCPreinc) return false;
878 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
879 Ptr = LD->getBasePtr();
880 VT = LD->getLoadedVT();
882 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
884 Ptr = ST->getBasePtr();
885 VT = ST->getStoredVT();
889 // PowerPC doesn't have preinc load/store instructions for vectors.
890 if (MVT::isVector(VT))
893 // TODO: Check reg+reg first.
895 // LDU/STU use reg+imm*4, others use reg+imm.
896 if (VT != MVT::i64) {
898 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG))
902 if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG))
906 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
907 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
908 // sext i32 to i64 when addr mode is r+i.
909 if (LD->getValueType(0) == MVT::i64 && LD->getLoadedVT() == MVT::i32 &&
910 LD->getExtensionType() == ISD::SEXTLOAD &&
911 isa<ConstantSDNode>(Offset))
919 //===----------------------------------------------------------------------===//
920 // LowerOperation implementation
921 //===----------------------------------------------------------------------===//
923 static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
924 MVT::ValueType PtrVT = Op.getValueType();
925 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
926 Constant *C = CP->getConstVal();
927 SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
928 SDOperand Zero = DAG.getConstant(0, PtrVT);
930 const TargetMachine &TM = DAG.getTarget();
932 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero);
933 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero);
935 // If this is a non-darwin platform, we don't support non-static relo models
937 if (TM.getRelocationModel() == Reloc::Static ||
938 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
939 // Generate non-pic code that has direct accesses to the constant pool.
940 // The address of the global is just (hi(&g)+lo(&g)).
941 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
944 if (TM.getRelocationModel() == Reloc::PIC_) {
945 // With PIC, the first instruction is actually "GR+hi(&G)".
946 Hi = DAG.getNode(ISD::ADD, PtrVT,
947 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
950 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
954 static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
955 MVT::ValueType PtrVT = Op.getValueType();
956 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
957 SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
958 SDOperand Zero = DAG.getConstant(0, PtrVT);
960 const TargetMachine &TM = DAG.getTarget();
962 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero);
963 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero);
965 // If this is a non-darwin platform, we don't support non-static relo models
967 if (TM.getRelocationModel() == Reloc::Static ||
968 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
969 // Generate non-pic code that has direct accesses to the constant pool.
970 // The address of the global is just (hi(&g)+lo(&g)).
971 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
974 if (TM.getRelocationModel() == Reloc::PIC_) {
975 // With PIC, the first instruction is actually "GR+hi(&G)".
976 Hi = DAG.getNode(ISD::ADD, PtrVT,
977 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
980 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
984 static SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
985 MVT::ValueType PtrVT = Op.getValueType();
986 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
987 GlobalValue *GV = GSDN->getGlobal();
988 SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
989 SDOperand Zero = DAG.getConstant(0, PtrVT);
991 const TargetMachine &TM = DAG.getTarget();
993 SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero);
994 SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero);
996 // If this is a non-darwin platform, we don't support non-static relo models
998 if (TM.getRelocationModel() == Reloc::Static ||
999 !TM.getSubtarget<PPCSubtarget>().isDarwin()) {
1000 // Generate non-pic code that has direct accesses to globals.
1001 // The address of the global is just (hi(&g)+lo(&g)).
1002 return DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1005 if (TM.getRelocationModel() == Reloc::PIC_) {
1006 // With PIC, the first instruction is actually "GR+hi(&G)".
1007 Hi = DAG.getNode(ISD::ADD, PtrVT,
1008 DAG.getNode(PPCISD::GlobalBaseReg, PtrVT), Hi);
1011 Lo = DAG.getNode(ISD::ADD, PtrVT, Hi, Lo);
1013 if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() &&
1014 (!GV->isExternal() || GV->hasNotBeenReadFromBytecode()))
1017 // If the global is weak or external, we have to go through the lazy
1019 return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, NULL, 0);
1022 static SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG) {
1023 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1025 // If we're comparing for equality to zero, expose the fact that this is
1026 // implented as a ctlz/srl pair on ppc, so that the dag combiner can
1027 // fold the new nodes.
1028 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1029 if (C->isNullValue() && CC == ISD::SETEQ) {
1030 MVT::ValueType VT = Op.getOperand(0).getValueType();
1031 SDOperand Zext = Op.getOperand(0);
1032 if (VT < MVT::i32) {
1034 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0));
1036 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT));
1037 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext);
1038 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz,
1039 DAG.getConstant(Log2b, MVT::i32));
1040 return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc);
1042 // Leave comparisons against 0 and -1 alone for now, since they're usually
1043 // optimized. FIXME: revisit this when we can custom lower all setcc
1045 if (C->isAllOnesValue() || C->isNullValue())
1049 // If we have an integer seteq/setne, turn it into a compare against zero
1050 // by xor'ing the rhs with the lhs, which is faster than setting a
1051 // condition register, reading it back out, and masking the correct bit. The
1052 // normal approach here uses sub to do this instead of xor. Using xor exposes
1053 // the result to other bit-twiddling opportunities.
1054 MVT::ValueType LHSVT = Op.getOperand(0).getValueType();
1055 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
1056 MVT::ValueType VT = Op.getValueType();
1057 SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0),
1059 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC);
1064 static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG,
1065 unsigned VarArgsFrameIndex) {
1066 // vastart just stores the address of the VarArgsFrameIndex slot into the
1067 // memory location argument.
1068 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1069 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1070 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2));
1071 return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV->getValue(),
1075 static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG,
1076 int &VarArgsFrameIndex) {
1077 // TODO: add description of PPC stack frame format, or at least some docs.
1079 MachineFunction &MF = DAG.getMachineFunction();
1080 MachineFrameInfo *MFI = MF.getFrameInfo();
1081 SSARegMap *RegMap = MF.getSSARegMap();
1082 SmallVector<SDOperand, 8> ArgValues;
1083 SDOperand Root = Op.getOperand(0);
1085 unsigned ArgOffset = 24;
1086 const unsigned Num_GPR_Regs = 8;
1087 const unsigned Num_FPR_Regs = 13;
1088 const unsigned Num_VR_Regs = 12;
1089 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
1091 static const unsigned GPR_32[] = { // 32-bit registers.
1092 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
1093 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1095 static const unsigned GPR_64[] = { // 64-bit registers.
1096 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
1097 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
1099 static const unsigned FPR[] = {
1100 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1101 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
1103 static const unsigned VR[] = {
1104 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
1105 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
1108 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1109 bool isPPC64 = PtrVT == MVT::i64;
1110 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
1112 // Add DAG nodes to load the arguments or copy them out of registers. On
1113 // entry to a function on PPC, the arguments start at offset 24, although the
1114 // first ones are often in registers.
1115 for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) {
1117 bool needsLoad = false;
1118 MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType();
1119 unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8;
1121 unsigned CurArgOffset = ArgOffset;
1123 default: assert(0 && "Unhandled argument type!");
1125 // All int arguments reserve stack space.
1126 ArgOffset += isPPC64 ? 8 : 4;
1128 if (GPR_idx != Num_GPR_Regs) {
1129 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1130 MF.addLiveIn(GPR[GPR_idx], VReg);
1131 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i32);
1137 case MVT::i64: // PPC64
1138 // All int arguments reserve stack space.
1141 if (GPR_idx != Num_GPR_Regs) {
1142 unsigned VReg = RegMap->createVirtualRegister(&PPC::G8RCRegClass);
1143 MF.addLiveIn(GPR[GPR_idx], VReg);
1144 ArgVal = DAG.getCopyFromReg(Root, VReg, MVT::i64);
1152 // All FP arguments reserve stack space.
1153 ArgOffset += ObjSize;
1155 // Every 4 bytes of argument space consumes one of the GPRs available for
1156 // argument passing.
1157 if (GPR_idx != Num_GPR_Regs) {
1159 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs)
1162 if (FPR_idx != Num_FPR_Regs) {
1164 if (ObjectVT == MVT::f32)
1165 VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass);
1167 VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass);
1168 MF.addLiveIn(FPR[FPR_idx], VReg);
1169 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
1179 // Note that vector arguments in registers don't reserve stack space.
1180 if (VR_idx != Num_VR_Regs) {
1181 unsigned VReg = RegMap->createVirtualRegister(&PPC::VRRCRegClass);
1182 MF.addLiveIn(VR[VR_idx], VReg);
1183 ArgVal = DAG.getCopyFromReg(Root, VReg, ObjectVT);
1186 // This should be simple, but requires getting 16-byte aligned stack
1188 assert(0 && "Loading VR argument not implemented yet!");
1194 // We need to load the argument to a virtual register if we determined above
1195 // that we ran out of physical registers of the appropriate type
1197 // If the argument is actually used, emit a load from the right stack
1199 if (!Op.Val->hasNUsesOfValue(0, ArgNo)) {
1200 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset);
1201 SDOperand FIN = DAG.getFrameIndex(FI, PtrVT);
1202 ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0);
1204 // Don't emit a dead load.
1205 ArgVal = DAG.getNode(ISD::UNDEF, ObjectVT);
1209 ArgValues.push_back(ArgVal);
1212 // If the function takes variable number of arguments, make a frame index for
1213 // the start of the first vararg value... for expansion of llvm.va_start.
1214 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1216 VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8,
1218 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
1219 // If this function is vararg, store any remaining integer argument regs
1220 // to their spots on the stack so that they may be loaded by deferencing the
1221 // result of va_next.
1222 SmallVector<SDOperand, 8> MemOps;
1223 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
1224 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
1225 MF.addLiveIn(GPR[GPR_idx], VReg);
1226 SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT);
1227 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
1228 MemOps.push_back(Store);
1229 // Increment the address by four for the next argument to store
1230 SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT);
1231 FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
1233 if (!MemOps.empty())
1234 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size());
1237 ArgValues.push_back(Root);
1239 // Return the new list of results.
1240 std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(),
1241 Op.Val->value_end());
1242 return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size());
1245 /// isCallCompatibleAddress - Return the immediate to use if the specified
1246 /// 32-bit value is representable in the immediate field of a BxA instruction.
1247 static SDNode *isBLACompatibleAddress(SDOperand Op, SelectionDAG &DAG) {
1248 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
1251 int Addr = C->getValue();
1252 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
1253 (Addr << 6 >> 6) != Addr)
1254 return 0; // Top 6 bits have to be sext of immediate.
1256 return DAG.getConstant((int)C->getValue() >> 2, MVT::i32).Val;
1260 static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) {
1261 SDOperand Chain = Op.getOperand(0);
1262 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
1263 SDOperand Callee = Op.getOperand(4);
1264 unsigned NumOps = (Op.getNumOperands() - 5) / 2;
1266 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1267 bool isPPC64 = PtrVT == MVT::i64;
1268 unsigned PtrByteSize = isPPC64 ? 8 : 4;
1271 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in
1272 // SelectExpr to use to put the arguments in the appropriate registers.
1273 std::vector<SDOperand> args_to_use;
1275 // Count how many bytes are to be pushed on the stack, including the linkage
1276 // area, and parameter passing area. We start with 24/48 bytes, which is
1277 // prereserved space for [SP][CR][LR][3 x unused].
1278 unsigned NumBytes = 6*PtrByteSize;
1280 // Add up all the space actually used.
1281 for (unsigned i = 0; i != NumOps; ++i)
1282 NumBytes += MVT::getSizeInBits(Op.getOperand(5+2*i).getValueType())/8;
1284 // The prolog code of the callee may store up to 8 GPR argument registers to
1285 // the stack, allowing va_start to index over them in memory if its varargs.
1286 // Because we cannot tell if this is needed on the caller side, we have to
1287 // conservatively assume that it is needed. As such, make sure we have at
1288 // least enough stack space for the caller to store the 8 GPRs.
1289 if (NumBytes < 6*PtrByteSize+8*PtrByteSize)
1290 NumBytes = 6*PtrByteSize+8*PtrByteSize;
1292 // Adjust the stack pointer for the new arguments...
1293 // These operations are automatically eliminated by the prolog/epilog pass
1294 Chain = DAG.getCALLSEQ_START(Chain,
1295 DAG.getConstant(NumBytes, PtrVT));
1297 // Set up a copy of the stack pointer for use loading and storing any
1298 // arguments that may not fit in the registers available for argument
1302 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
1304 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
1306 // Figure out which arguments are going to go in registers, and which in
1307 // memory. Also, if this is a vararg function, floating point operations
1308 // must be stored to our stack, and loaded into integer regs as well, if
1309 // any integer regs are available for argument passing.
1310 unsigned ArgOffset = 6*PtrByteSize;
1311 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
1312 static const unsigned GPR_32[] = { // 32-bit registers.
1313 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
1314 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
1316 static const unsigned GPR_64[] = { // 64-bit registers.
1317 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
1318 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
1320 static const unsigned FPR[] = {
1321 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
1322 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
1324 static const unsigned VR[] = {
1325 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
1326 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
1328 const unsigned NumGPRs = sizeof(GPR_32)/sizeof(GPR_32[0]);
1329 const unsigned NumFPRs = sizeof(FPR)/sizeof(FPR[0]);
1330 const unsigned NumVRs = sizeof( VR)/sizeof( VR[0]);
1332 const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
1334 std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
1335 SmallVector<SDOperand, 8> MemOpChains;
1336 for (unsigned i = 0; i != NumOps; ++i) {
1337 SDOperand Arg = Op.getOperand(5+2*i);
1339 // PtrOff will be used to store the current argument to the stack if a
1340 // register cannot be found for it.
1341 SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
1342 PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff);
1344 // On PPC64, promote integers to 64-bit values.
1345 if (isPPC64 && Arg.getValueType() == MVT::i32) {
1346 unsigned ExtOp = ISD::ZERO_EXTEND;
1347 if (cast<ConstantSDNode>(Op.getOperand(5+2*i+1))->getValue())
1348 ExtOp = ISD::SIGN_EXTEND;
1349 Arg = DAG.getNode(ExtOp, MVT::i64, Arg);
1352 switch (Arg.getValueType()) {
1353 default: assert(0 && "Unexpected ValueType for argument!");
1356 if (GPR_idx != NumGPRs) {
1357 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
1359 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
1361 ArgOffset += PtrByteSize;
1365 if (FPR_idx != NumFPRs) {
1366 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
1369 SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
1370 MemOpChains.push_back(Store);
1372 // Float varargs are always shadowed in available integer registers
1373 if (GPR_idx != NumGPRs) {
1374 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0);
1375 MemOpChains.push_back(Load.getValue(1));
1376 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
1378 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64) {
1379 SDOperand ConstFour = DAG.getConstant(4, PtrOff.getValueType());
1380 PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour);
1381 SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0);
1382 MemOpChains.push_back(Load.getValue(1));
1383 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
1386 // If we have any FPRs remaining, we may also have GPRs remaining.
1387 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
1389 if (GPR_idx != NumGPRs)
1391 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64)
1395 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
1400 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
1406 assert(!isVarArg && "Don't support passing vectors to varargs yet!");
1407 assert(VR_idx != NumVRs &&
1408 "Don't support passing more than 12 vector args yet!");
1409 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
1413 if (!MemOpChains.empty())
1414 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
1415 &MemOpChains[0], MemOpChains.size());
1417 // Build a sequence of copy-to-reg nodes chained together with token chain
1418 // and flag operands which copy the outgoing args into the appropriate regs.
1420 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1421 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
1423 InFlag = Chain.getValue(1);
1426 std::vector<MVT::ValueType> NodeTys;
1427 NodeTys.push_back(MVT::Other); // Returns a chain
1428 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1430 SmallVector<SDOperand, 8> Ops;
1431 unsigned CallOpc = PPCISD::CALL;
1433 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1434 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1435 // node so that legalize doesn't hack it.
1436 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1437 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType());
1438 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
1439 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType());
1440 else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
1441 // If this is an absolute destination address, use the munged value.
1442 Callee = SDOperand(Dest, 0);
1444 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair
1445 // to do the call, we can't use PPCISD::CALL.
1446 SDOperand MTCTROps[] = {Chain, Callee, InFlag};
1447 Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, MTCTROps, 2+(InFlag.Val!=0));
1448 InFlag = Chain.getValue(1);
1450 // Copy the callee address into R12 on darwin.
1451 Chain = DAG.getCopyToReg(Chain, PPC::R12, Callee, InFlag);
1452 InFlag = Chain.getValue(1);
1455 NodeTys.push_back(MVT::Other);
1456 NodeTys.push_back(MVT::Flag);
1457 Ops.push_back(Chain);
1458 CallOpc = PPCISD::BCTRL;
1462 // If this is a direct call, pass the chain and the callee.
1464 Ops.push_back(Chain);
1465 Ops.push_back(Callee);
1468 // Add argument registers to the end of the list so that they are known live
1470 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1471 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1472 RegsToPass[i].second.getValueType()));
1475 Ops.push_back(InFlag);
1476 Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size());
1477 InFlag = Chain.getValue(1);
1479 SDOperand ResultVals[3];
1480 unsigned NumResults = 0;
1483 // If the call has results, copy the values out of the ret val registers.
1484 switch (Op.Val->getValueType(0)) {
1485 default: assert(0 && "Unexpected ret value!");
1486 case MVT::Other: break;
1488 if (Op.Val->getValueType(1) == MVT::i32) {
1489 Chain = DAG.getCopyFromReg(Chain, PPC::R4, MVT::i32, InFlag).getValue(1);
1490 ResultVals[0] = Chain.getValue(0);
1491 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32,
1492 Chain.getValue(2)).getValue(1);
1493 ResultVals[1] = Chain.getValue(0);
1495 NodeTys.push_back(MVT::i32);
1497 Chain = DAG.getCopyFromReg(Chain, PPC::R3, MVT::i32, InFlag).getValue(1);
1498 ResultVals[0] = Chain.getValue(0);
1501 NodeTys.push_back(MVT::i32);
1504 Chain = DAG.getCopyFromReg(Chain, PPC::X3, MVT::i64, InFlag).getValue(1);
1505 ResultVals[0] = Chain.getValue(0);
1507 NodeTys.push_back(MVT::i64);
1511 Chain = DAG.getCopyFromReg(Chain, PPC::F1, Op.Val->getValueType(0),
1512 InFlag).getValue(1);
1513 ResultVals[0] = Chain.getValue(0);
1515 NodeTys.push_back(Op.Val->getValueType(0));
1521 Chain = DAG.getCopyFromReg(Chain, PPC::V2, Op.Val->getValueType(0),
1522 InFlag).getValue(1);
1523 ResultVals[0] = Chain.getValue(0);
1525 NodeTys.push_back(Op.Val->getValueType(0));
1529 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
1530 DAG.getConstant(NumBytes, PtrVT));
1531 NodeTys.push_back(MVT::Other);
1533 // If the function returns void, just return the chain.
1534 if (NumResults == 0)
1537 // Otherwise, merge everything together with a MERGE_VALUES node.
1538 ResultVals[NumResults++] = Chain;
1539 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
1540 ResultVals, NumResults);
1541 return Res.getValue(Op.ResNo);
1544 static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) {
1546 switch(Op.getNumOperands()) {
1548 assert(0 && "Do not know how to return this many arguments!");
1551 return SDOperand(); // ret void is legal
1553 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
1555 if (ArgVT == MVT::i32) {
1557 } else if (ArgVT == MVT::i64) {
1559 } else if (MVT::isVector(ArgVT)) {
1562 assert(MVT::isFloatingPoint(ArgVT));
1566 Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1),
1569 // If we haven't noted the R3/F1 are live out, do so now.
1570 if (DAG.getMachineFunction().liveout_empty())
1571 DAG.getMachineFunction().addLiveOut(ArgReg);
1575 Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(3),
1577 Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1));
1578 // If we haven't noted the R3+R4 are live out, do so now.
1579 if (DAG.getMachineFunction().liveout_empty()) {
1580 DAG.getMachineFunction().addLiveOut(PPC::R3);
1581 DAG.getMachineFunction().addLiveOut(PPC::R4);
1585 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1));
1588 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
1590 static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) {
1591 // Not FP? Not a fsel.
1592 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) ||
1593 !MVT::isFloatingPoint(Op.getOperand(2).getValueType()))
1596 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
1598 // Cannot handle SETEQ/SETNE.
1599 if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand();
1601 MVT::ValueType ResVT = Op.getValueType();
1602 MVT::ValueType CmpVT = Op.getOperand(0).getValueType();
1603 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
1604 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3);
1606 // If the RHS of the comparison is a 0.0, we don't need to do the
1607 // subtraction at all.
1608 if (isFloatingPointZero(RHS))
1610 default: break; // SETUO etc aren't handled by fsel.
1614 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
1618 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
1619 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
1620 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV);
1624 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
1628 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
1629 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
1630 return DAG.getNode(PPCISD::FSEL, ResVT,
1631 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV);
1636 default: break; // SETUO etc aren't handled by fsel.
1640 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
1641 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
1642 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
1643 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
1647 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
1648 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
1649 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
1650 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
1654 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
1655 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
1656 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
1657 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
1661 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
1662 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
1663 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
1664 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
1669 static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
1670 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType()));
1671 SDOperand Src = Op.getOperand(0);
1672 if (Src.getValueType() == MVT::f32)
1673 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src);
1676 switch (Op.getValueType()) {
1677 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!");
1679 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src);
1682 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src);
1686 // Convert the FP value to an int value through memory.
1687 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp);
1688 if (Op.getValueType() == MVT::i32)
1689 Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits);
1693 static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
1694 if (Op.getOperand(0).getValueType() == MVT::i64) {
1695 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0));
1696 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits);
1697 if (Op.getValueType() == MVT::f32)
1698 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP);
1702 assert(Op.getOperand(0).getValueType() == MVT::i32 &&
1703 "Unhandled SINT_TO_FP type in custom expander!");
1704 // Since we only generate this in 64-bit mode, we can take advantage of
1705 // 64-bit registers. In particular, sign extend the input value into the
1706 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
1707 // then lfd it and fcfid it.
1708 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
1709 int FrameIdx = FrameInfo->CreateStackObject(8, 8);
1710 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
1711 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
1713 SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32,
1716 // STD the extended value into the stack slot.
1717 SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other,
1718 DAG.getEntryNode(), Ext64, FIdx,
1719 DAG.getSrcValue(NULL));
1720 // Load the value as a double.
1721 SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0);
1723 // FCFID it and return it.
1724 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld);
1725 if (Op.getValueType() == MVT::f32)
1726 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP);
1730 static SDOperand LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) {
1731 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
1732 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
1734 // Expand into a bunch of logical ops. Note that these ops
1735 // depend on the PPC behavior for oversized shift amounts.
1736 SDOperand Lo = Op.getOperand(0);
1737 SDOperand Hi = Op.getOperand(1);
1738 SDOperand Amt = Op.getOperand(2);
1740 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
1741 DAG.getConstant(32, MVT::i32), Amt);
1742 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt);
1743 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1);
1744 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
1745 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
1746 DAG.getConstant(-32U, MVT::i32));
1747 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5);
1748 SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
1749 SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt);
1750 SDOperand OutOps[] = { OutLo, OutHi };
1751 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32),
1755 static SDOperand LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) {
1756 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
1757 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRL!");
1759 // Otherwise, expand into a bunch of logical ops. Note that these ops
1760 // depend on the PPC behavior for oversized shift amounts.
1761 SDOperand Lo = Op.getOperand(0);
1762 SDOperand Hi = Op.getOperand(1);
1763 SDOperand Amt = Op.getOperand(2);
1765 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
1766 DAG.getConstant(32, MVT::i32), Amt);
1767 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
1768 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
1769 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
1770 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
1771 DAG.getConstant(-32U, MVT::i32));
1772 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5);
1773 SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
1774 SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt);
1775 SDOperand OutOps[] = { OutLo, OutHi };
1776 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32),
1780 static SDOperand LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) {
1781 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
1782 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!");
1784 // Otherwise, expand into a bunch of logical ops, followed by a select_cc.
1785 SDOperand Lo = Op.getOperand(0);
1786 SDOperand Hi = Op.getOperand(1);
1787 SDOperand Amt = Op.getOperand(2);
1789 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
1790 DAG.getConstant(32, MVT::i32), Amt);
1791 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
1792 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
1793 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
1794 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
1795 DAG.getConstant(-32U, MVT::i32));
1796 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5);
1797 SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt);
1798 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32),
1799 Tmp4, Tmp6, ISD::SETLE);
1800 SDOperand OutOps[] = { OutLo, OutHi };
1801 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::i32, MVT::i32),
1805 //===----------------------------------------------------------------------===//
1806 // Vector related lowering.
1809 // If this is a vector of constants or undefs, get the bits. A bit in
1810 // UndefBits is set if the corresponding element of the vector is an
1811 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
1812 // zero. Return true if this is not an array of constants, false if it is.
1814 static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2],
1815 uint64_t UndefBits[2]) {
1816 // Start with zero'd results.
1817 VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0;
1819 unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType());
1820 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
1821 SDOperand OpVal = BV->getOperand(i);
1823 unsigned PartNo = i >= e/2; // In the upper 128 bits?
1824 unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t.
1826 uint64_t EltBits = 0;
1827 if (OpVal.getOpcode() == ISD::UNDEF) {
1828 uint64_t EltUndefBits = ~0U >> (32-EltBitSize);
1829 UndefBits[PartNo] |= EltUndefBits << (SlotNo*EltBitSize);
1831 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
1832 EltBits = CN->getValue() & (~0U >> (32-EltBitSize));
1833 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
1834 assert(CN->getValueType(0) == MVT::f32 &&
1835 "Only one legal FP vector type!");
1836 EltBits = FloatToBits(CN->getValue());
1838 // Nonconstant element.
1842 VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize);
1845 //printf("%llx %llx %llx %llx\n",
1846 // VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]);
1850 // If this is a splat (repetition) of a value across the whole vector, return
1851 // the smallest size that splats it. For example, "0x01010101010101..." is a
1852 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
1853 // SplatSize = 1 byte.
1854 static bool isConstantSplat(const uint64_t Bits128[2],
1855 const uint64_t Undef128[2],
1856 unsigned &SplatBits, unsigned &SplatUndef,
1857 unsigned &SplatSize) {
1859 // Don't let undefs prevent splats from matching. See if the top 64-bits are
1860 // the same as the lower 64-bits, ignoring undefs.
1861 if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0]))
1862 return false; // Can't be a splat if two pieces don't match.
1864 uint64_t Bits64 = Bits128[0] | Bits128[1];
1865 uint64_t Undef64 = Undef128[0] & Undef128[1];
1867 // Check that the top 32-bits are the same as the lower 32-bits, ignoring
1869 if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64))
1870 return false; // Can't be a splat if two pieces don't match.
1872 uint32_t Bits32 = uint32_t(Bits64) | uint32_t(Bits64 >> 32);
1873 uint32_t Undef32 = uint32_t(Undef64) & uint32_t(Undef64 >> 32);
1875 // If the top 16-bits are different than the lower 16-bits, ignoring
1876 // undefs, we have an i32 splat.
1877 if ((Bits32 & (~Undef32 >> 16)) != ((Bits32 >> 16) & ~Undef32)) {
1879 SplatUndef = Undef32;
1884 uint16_t Bits16 = uint16_t(Bits32) | uint16_t(Bits32 >> 16);
1885 uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16);
1887 // If the top 8-bits are different than the lower 8-bits, ignoring
1888 // undefs, we have an i16 splat.
1889 if ((Bits16 & (uint16_t(~Undef16) >> 8)) != ((Bits16 >> 8) & ~Undef16)) {
1891 SplatUndef = Undef16;
1896 // Otherwise, we have an 8-bit splat.
1897 SplatBits = uint8_t(Bits16) | uint8_t(Bits16 >> 8);
1898 SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
1903 /// BuildSplatI - Build a canonical splati of Val with an element size of
1904 /// SplatSize. Cast the result to VT.
1905 static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT,
1906 SelectionDAG &DAG) {
1907 assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
1909 // Force vspltis[hw] -1 to vspltisb -1.
1910 if (Val == -1) SplatSize = 1;
1912 static const MVT::ValueType VTys[] = { // canonical VT to use for each size.
1913 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
1915 MVT::ValueType CanonicalVT = VTys[SplatSize-1];
1917 // Build a canonical splat for this value.
1918 SDOperand Elt = DAG.getConstant(Val, MVT::getVectorBaseType(CanonicalVT));
1919 SmallVector<SDOperand, 8> Ops;
1920 Ops.assign(MVT::getVectorNumElements(CanonicalVT), Elt);
1921 SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT,
1922 &Ops[0], Ops.size());
1923 return DAG.getNode(ISD::BIT_CONVERT, VT, Res);
1926 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
1927 /// specified intrinsic ID.
1928 static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS,
1930 MVT::ValueType DestVT = MVT::Other) {
1931 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
1932 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
1933 DAG.getConstant(IID, MVT::i32), LHS, RHS);
1936 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
1937 /// specified intrinsic ID.
1938 static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1,
1939 SDOperand Op2, SelectionDAG &DAG,
1940 MVT::ValueType DestVT = MVT::Other) {
1941 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
1942 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
1943 DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
1947 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
1948 /// amount. The result has the specified value type.
1949 static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt,
1950 MVT::ValueType VT, SelectionDAG &DAG) {
1951 // Force LHS/RHS to be the right type.
1952 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS);
1953 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS);
1956 for (unsigned i = 0; i != 16; ++i)
1957 Ops[i] = DAG.getConstant(i+Amt, MVT::i32);
1958 SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS,
1959 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16));
1960 return DAG.getNode(ISD::BIT_CONVERT, VT, T);
1963 // If this is a case we can't handle, return null and let the default
1964 // expansion code take care of it. If we CAN select this case, and if it
1965 // selects to a single instruction, return Op. Otherwise, if we can codegen
1966 // this case more efficiently than a constant pool load, lower it to the
1967 // sequence of ops that should be used.
1968 static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
1969 // If this is a vector of constants or undefs, get the bits. A bit in
1970 // UndefBits is set if the corresponding element of the vector is an
1971 // ISD::UNDEF value. For undefs, the corresponding VectorBits values are
1973 uint64_t VectorBits[2];
1974 uint64_t UndefBits[2];
1975 if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits))
1976 return SDOperand(); // Not a constant vector.
1978 // If this is a splat (repetition) of a value across the whole vector, return
1979 // the smallest size that splats it. For example, "0x01010101010101..." is a
1980 // splat of 0x01, 0x0101, and 0x01010101. We return SplatBits = 0x01 and
1981 // SplatSize = 1 byte.
1982 unsigned SplatBits, SplatUndef, SplatSize;
1983 if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){
1984 bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0;
1986 // First, handle single instruction cases.
1989 if (SplatBits == 0) {
1990 // Canonicalize all zero vectors to be v4i32.
1991 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
1992 SDOperand Z = DAG.getConstant(0, MVT::i32);
1993 Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z);
1994 Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z);
1999 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
2000 int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize);
2001 if (SextVal >= -16 && SextVal <= 15)
2002 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG);
2005 // Two instruction sequences.
2007 // If this value is in the range [-32,30] and is even, use:
2008 // tmp = VSPLTI[bhw], result = add tmp, tmp
2009 if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
2010 Op = BuildSplatI(SextVal >> 1, SplatSize, Op.getValueType(), DAG);
2011 return DAG.getNode(ISD::ADD, Op.getValueType(), Op, Op);
2014 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
2015 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
2017 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
2018 // Make -1 and vspltisw -1:
2019 SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG);
2021 // Make the VSLW intrinsic, computing 0x8000_0000.
2022 SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
2025 // xor by OnesV to invert it.
2026 Res = DAG.getNode(ISD::XOR, MVT::v4i32, Res, OnesV);
2027 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res);
2030 // Check to see if this is a wide variety of vsplti*, binop self cases.
2031 unsigned SplatBitSize = SplatSize*8;
2032 static const char SplatCsts[] = {
2033 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
2034 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
2036 for (unsigned idx = 0; idx < sizeof(SplatCsts)/sizeof(SplatCsts[0]); ++idx){
2037 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
2038 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
2039 int i = SplatCsts[idx];
2041 // Figure out what shift amount will be used by altivec if shifted by i in
2043 unsigned TypeShiftAmt = i & (SplatBitSize-1);
2045 // vsplti + shl self.
2046 if (SextVal == (i << (int)TypeShiftAmt)) {
2047 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
2048 static const unsigned IIDs[] = { // Intrinsic to use for each size.
2049 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
2050 Intrinsic::ppc_altivec_vslw
2052 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
2055 // vsplti + srl self.
2056 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
2057 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
2058 static const unsigned IIDs[] = { // Intrinsic to use for each size.
2059 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
2060 Intrinsic::ppc_altivec_vsrw
2062 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
2065 // vsplti + sra self.
2066 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
2067 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
2068 static const unsigned IIDs[] = { // Intrinsic to use for each size.
2069 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
2070 Intrinsic::ppc_altivec_vsraw
2072 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
2075 // vsplti + rol self.
2076 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
2077 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
2078 Op = BuildSplatI(i, SplatSize, Op.getValueType(), DAG);
2079 static const unsigned IIDs[] = { // Intrinsic to use for each size.
2080 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
2081 Intrinsic::ppc_altivec_vrlw
2083 return BuildIntrinsicOp(IIDs[SplatSize-1], Op, Op, DAG);
2086 // t = vsplti c, result = vsldoi t, t, 1
2087 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) {
2088 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
2089 return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG);
2091 // t = vsplti c, result = vsldoi t, t, 2
2092 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) {
2093 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
2094 return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG);
2096 // t = vsplti c, result = vsldoi t, t, 3
2097 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) {
2098 SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG);
2099 return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG);
2103 // Three instruction sequences.
2105 // Odd, in range [17,31]: (vsplti C)-(vsplti -16).
2106 if (SextVal >= 0 && SextVal <= 31) {
2107 SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, Op.getValueType(),DAG);
2108 SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG);
2109 return DAG.getNode(ISD::SUB, Op.getValueType(), LHS, RHS);
2111 // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
2112 if (SextVal >= -31 && SextVal <= 0) {
2113 SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, Op.getValueType(),DAG);
2114 SDOperand RHS = BuildSplatI(-16, SplatSize, Op.getValueType(), DAG);
2115 return DAG.getNode(ISD::ADD, Op.getValueType(), LHS, RHS);
2122 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
2123 /// the specified operations to build the shuffle.
2124 static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS,
2125 SDOperand RHS, SelectionDAG &DAG) {
2126 unsigned OpNum = (PFEntry >> 26) & 0x0F;
2127 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
2128 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
2131 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
2143 if (OpNum == OP_COPY) {
2144 if (LHSID == (1*9+2)*9+3) return LHS;
2145 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
2149 SDOperand OpLHS, OpRHS;
2150 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG);
2151 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG);
2153 unsigned ShufIdxs[16];
2155 default: assert(0 && "Unknown i32 permute!");
2157 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
2158 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
2159 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
2160 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
2163 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
2164 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
2165 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
2166 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
2169 for (unsigned i = 0; i != 16; ++i)
2170 ShufIdxs[i] = (i&3)+0;
2173 for (unsigned i = 0; i != 16; ++i)
2174 ShufIdxs[i] = (i&3)+4;
2177 for (unsigned i = 0; i != 16; ++i)
2178 ShufIdxs[i] = (i&3)+8;
2181 for (unsigned i = 0; i != 16; ++i)
2182 ShufIdxs[i] = (i&3)+12;
2185 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG);
2187 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG);
2189 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG);
2192 for (unsigned i = 0; i != 16; ++i)
2193 Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i32);
2195 return DAG.getNode(ISD::VECTOR_SHUFFLE, OpLHS.getValueType(), OpLHS, OpRHS,
2196 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16));
2199 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
2200 /// is a shuffle we can handle in a single instruction, return it. Otherwise,
2201 /// return the code it can be lowered into. Worst case, it can always be
2202 /// lowered into a vperm.
2203 static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
2204 SDOperand V1 = Op.getOperand(0);
2205 SDOperand V2 = Op.getOperand(1);
2206 SDOperand PermMask = Op.getOperand(2);
2208 // Cases that are handled by instructions that take permute immediates
2209 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
2210 // selected by the instruction selector.
2211 if (V2.getOpcode() == ISD::UNDEF) {
2212 if (PPC::isSplatShuffleMask(PermMask.Val, 1) ||
2213 PPC::isSplatShuffleMask(PermMask.Val, 2) ||
2214 PPC::isSplatShuffleMask(PermMask.Val, 4) ||
2215 PPC::isVPKUWUMShuffleMask(PermMask.Val, true) ||
2216 PPC::isVPKUHUMShuffleMask(PermMask.Val, true) ||
2217 PPC::isVSLDOIShuffleMask(PermMask.Val, true) != -1 ||
2218 PPC::isVMRGLShuffleMask(PermMask.Val, 1, true) ||
2219 PPC::isVMRGLShuffleMask(PermMask.Val, 2, true) ||
2220 PPC::isVMRGLShuffleMask(PermMask.Val, 4, true) ||
2221 PPC::isVMRGHShuffleMask(PermMask.Val, 1, true) ||
2222 PPC::isVMRGHShuffleMask(PermMask.Val, 2, true) ||
2223 PPC::isVMRGHShuffleMask(PermMask.Val, 4, true)) {
2228 // Altivec has a variety of "shuffle immediates" that take two vector inputs
2229 // and produce a fixed permutation. If any of these match, do not lower to
2231 if (PPC::isVPKUWUMShuffleMask(PermMask.Val, false) ||
2232 PPC::isVPKUHUMShuffleMask(PermMask.Val, false) ||
2233 PPC::isVSLDOIShuffleMask(PermMask.Val, false) != -1 ||
2234 PPC::isVMRGLShuffleMask(PermMask.Val, 1, false) ||
2235 PPC::isVMRGLShuffleMask(PermMask.Val, 2, false) ||
2236 PPC::isVMRGLShuffleMask(PermMask.Val, 4, false) ||
2237 PPC::isVMRGHShuffleMask(PermMask.Val, 1, false) ||
2238 PPC::isVMRGHShuffleMask(PermMask.Val, 2, false) ||
2239 PPC::isVMRGHShuffleMask(PermMask.Val, 4, false))
2242 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
2243 // perfect shuffle table to emit an optimal matching sequence.
2244 unsigned PFIndexes[4];
2245 bool isFourElementShuffle = true;
2246 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
2247 unsigned EltNo = 8; // Start out undef.
2248 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
2249 if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF)
2250 continue; // Undef, ignore it.
2252 unsigned ByteSource =
2253 cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getValue();
2254 if ((ByteSource & 3) != j) {
2255 isFourElementShuffle = false;
2260 EltNo = ByteSource/4;
2261 } else if (EltNo != ByteSource/4) {
2262 isFourElementShuffle = false;
2266 PFIndexes[i] = EltNo;
2269 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
2270 // perfect shuffle vector to determine if it is cost effective to do this as
2271 // discrete instructions, or whether we should use a vperm.
2272 if (isFourElementShuffle) {
2273 // Compute the index in the perfect shuffle table.
2274 unsigned PFTableIndex =
2275 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
2277 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
2278 unsigned Cost = (PFEntry >> 30);
2280 // Determining when to avoid vperm is tricky. Many things affect the cost
2281 // of vperm, particularly how many times the perm mask needs to be computed.
2282 // For example, if the perm mask can be hoisted out of a loop or is already
2283 // used (perhaps because there are multiple permutes with the same shuffle
2284 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of
2285 // the loop requires an extra register.
2287 // As a compromise, we only emit discrete instructions if the shuffle can be
2288 // generated in 3 or fewer operations. When we have loop information
2289 // available, if this block is within a loop, we should avoid using vperm
2290 // for 3-operation perms and use a constant pool load instead.
2292 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG);
2295 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
2296 // vector that will get spilled to the constant pool.
2297 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
2299 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
2300 // that it is in input element units, not in bytes. Convert now.
2301 MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType());
2302 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8;
2304 SmallVector<SDOperand, 16> ResultMask;
2305 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
2307 if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF)
2310 SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getValue();
2312 for (unsigned j = 0; j != BytesPerElement; ++j)
2313 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
2317 SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
2318 &ResultMask[0], ResultMask.size());
2319 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask);
2322 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an
2323 /// altivec comparison. If it is, return true and fill in Opc/isDot with
2324 /// information about the intrinsic.
2325 static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc,
2327 unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue();
2330 switch (IntrinsicID) {
2331 default: return false;
2332 // Comparison predicates.
2333 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1; break;
2334 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break;
2335 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1; break;
2336 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1; break;
2337 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break;
2338 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break;
2339 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break;
2340 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break;
2341 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break;
2342 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break;
2343 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break;
2344 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break;
2345 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break;
2347 // Normal Comparisons.
2348 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0; break;
2349 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0; break;
2350 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0; break;
2351 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0; break;
2352 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0; break;
2353 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0; break;
2354 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0; break;
2355 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0; break;
2356 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0; break;
2357 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0; break;
2358 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0; break;
2359 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0; break;
2360 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0; break;
2365 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
2366 /// lower, do it, otherwise return null.
2367 static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
2368 // If this is a lowered altivec predicate compare, CompareOpc is set to the
2369 // opcode number of the comparison.
2372 if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
2373 return SDOperand(); // Don't custom lower most intrinsics.
2375 // If this is a non-dot comparison, make the VCMP node and we are done.
2377 SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(),
2378 Op.getOperand(1), Op.getOperand(2),
2379 DAG.getConstant(CompareOpc, MVT::i32));
2380 return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp);
2383 // Create the PPCISD altivec 'dot' comparison node.
2385 Op.getOperand(2), // LHS
2386 Op.getOperand(3), // RHS
2387 DAG.getConstant(CompareOpc, MVT::i32)
2389 std::vector<MVT::ValueType> VTs;
2390 VTs.push_back(Op.getOperand(2).getValueType());
2391 VTs.push_back(MVT::Flag);
2392 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
2394 // Now that we have the comparison, emit a copy from the CR to a GPR.
2395 // This is flagged to the above dot comparison.
2396 SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32,
2397 DAG.getRegister(PPC::CR6, MVT::i32),
2398 CompNode.getValue(1));
2400 // Unpack the result based on how the target uses it.
2401 unsigned BitNo; // Bit # of CR6.
2402 bool InvertBit; // Invert result?
2403 switch (cast<ConstantSDNode>(Op.getOperand(1))->getValue()) {
2404 default: // Can't happen, don't crash on invalid number though.
2405 case 0: // Return the value of the EQ bit of CR6.
2406 BitNo = 0; InvertBit = false;
2408 case 1: // Return the inverted value of the EQ bit of CR6.
2409 BitNo = 0; InvertBit = true;
2411 case 2: // Return the value of the LT bit of CR6.
2412 BitNo = 2; InvertBit = false;
2414 case 3: // Return the inverted value of the LT bit of CR6.
2415 BitNo = 2; InvertBit = true;
2419 // Shift the bit into the low position.
2420 Flags = DAG.getNode(ISD::SRL, MVT::i32, Flags,
2421 DAG.getConstant(8-(3-BitNo), MVT::i32));
2423 Flags = DAG.getNode(ISD::AND, MVT::i32, Flags,
2424 DAG.getConstant(1, MVT::i32));
2426 // If we are supposed to, toggle the bit.
2428 Flags = DAG.getNode(ISD::XOR, MVT::i32, Flags,
2429 DAG.getConstant(1, MVT::i32));
2433 static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
2434 // Create a stack slot that is 16-byte aligned.
2435 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
2436 int FrameIdx = FrameInfo->CreateStackObject(16, 16);
2437 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
2438 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
2440 // Store the input value into Value#0 of the stack slot.
2441 SDOperand Store = DAG.getStore(DAG.getEntryNode(),
2442 Op.getOperand(0), FIdx, NULL, 0);
2444 return DAG.getLoad(Op.getValueType(), Store, FIdx, NULL, 0);
2447 static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG) {
2448 if (Op.getValueType() == MVT::v4i32) {
2449 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
2451 SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG);
2452 SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt.
2454 SDOperand RHSSwap = // = vrlw RHS, 16
2455 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG);
2457 // Shrinkify inputs to v8i16.
2458 LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, LHS);
2459 RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHS);
2460 RHSSwap = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, RHSSwap);
2462 // Low parts multiplied together, generating 32-bit results (we ignore the
2464 SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
2465 LHS, RHS, DAG, MVT::v4i32);
2467 SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
2468 LHS, RHSSwap, Zero, DAG, MVT::v4i32);
2469 // Shift the high parts up 16 bits.
2470 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG);
2471 return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd);
2472 } else if (Op.getValueType() == MVT::v8i16) {
2473 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
2475 SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG);
2477 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
2478 LHS, RHS, Zero, DAG);
2479 } else if (Op.getValueType() == MVT::v16i8) {
2480 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
2482 // Multiply the even 8-bit parts, producing 16-bit sums.
2483 SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
2484 LHS, RHS, DAG, MVT::v8i16);
2485 EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts);
2487 // Multiply the odd 8-bit parts, producing 16-bit sums.
2488 SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
2489 LHS, RHS, DAG, MVT::v8i16);
2490 OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts);
2492 // Merge the results together.
2494 for (unsigned i = 0; i != 8; ++i) {
2495 Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8);
2496 Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8);
2498 return DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, EvenParts, OddParts,
2499 DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops, 16));
2501 assert(0 && "Unknown mul to lower!");
2506 /// LowerOperation - Provide custom lowering hooks for some operations.
2508 SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
2509 switch (Op.getOpcode()) {
2510 default: assert(0 && "Wasn't expecting to be able to lower this!");
2511 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
2512 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
2513 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
2514 case ISD::SETCC: return LowerSETCC(Op, DAG);
2515 case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex);
2516 case ISD::FORMAL_ARGUMENTS:
2517 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex);
2518 case ISD::CALL: return LowerCALL(Op, DAG);
2519 case ISD::RET: return LowerRET(Op, DAG);
2521 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
2522 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
2523 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
2525 // Lower 64-bit shifts.
2526 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
2527 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG);
2528 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG);
2530 // Vector-related lowering.
2531 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
2532 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
2533 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2534 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
2535 case ISD::MUL: return LowerMUL(Op, DAG);
2540 //===----------------------------------------------------------------------===//
2541 // Other Lowering Code
2542 //===----------------------------------------------------------------------===//
2545 PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
2546 MachineBasicBlock *BB) {
2547 assert((MI->getOpcode() == PPC::SELECT_CC_I4 ||
2548 MI->getOpcode() == PPC::SELECT_CC_I8 ||
2549 MI->getOpcode() == PPC::SELECT_CC_F4 ||
2550 MI->getOpcode() == PPC::SELECT_CC_F8 ||
2551 MI->getOpcode() == PPC::SELECT_CC_VRRC) &&
2552 "Unexpected instr type to insert");
2554 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
2555 // control-flow pattern. The incoming instruction knows the destination vreg
2556 // to set, the condition code register to branch on, the true/false values to
2557 // select between, and a branch opcode to use.
2558 const BasicBlock *LLVM_BB = BB->getBasicBlock();
2559 ilist<MachineBasicBlock>::iterator It = BB;
2565 // cmpTY ccX, r1, r2
2567 // fallthrough --> copy0MBB
2568 MachineBasicBlock *thisMBB = BB;
2569 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
2570 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
2571 BuildMI(BB, MI->getOperand(4).getImmedValue(), 2)
2572 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
2573 MachineFunction *F = BB->getParent();
2574 F->getBasicBlockList().insert(It, copy0MBB);
2575 F->getBasicBlockList().insert(It, sinkMBB);
2576 // Update machine-CFG edges by first adding all successors of the current
2577 // block to the new block which will contain the Phi node for the select.
2578 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
2579 e = BB->succ_end(); i != e; ++i)
2580 sinkMBB->addSuccessor(*i);
2581 // Next, remove all successors of the current block, and add the true
2582 // and fallthrough blocks as its successors.
2583 while(!BB->succ_empty())
2584 BB->removeSuccessor(BB->succ_begin());
2585 BB->addSuccessor(copy0MBB);
2586 BB->addSuccessor(sinkMBB);
2589 // %FalseValue = ...
2590 // # fallthrough to sinkMBB
2593 // Update machine-CFG edges
2594 BB->addSuccessor(sinkMBB);
2597 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
2600 BuildMI(BB, PPC::PHI, 4, MI->getOperand(0).getReg())
2601 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
2602 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
2604 delete MI; // The pseudo instruction is gone now.
2608 //===----------------------------------------------------------------------===//
2609 // Target Optimization Hooks
2610 //===----------------------------------------------------------------------===//
2612 SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N,
2613 DAGCombinerInfo &DCI) const {
2614 TargetMachine &TM = getTargetMachine();
2615 SelectionDAG &DAG = DCI.DAG;
2616 switch (N->getOpcode()) {
2619 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
2620 if (C->getValue() == 0) // 0 << V -> 0.
2621 return N->getOperand(0);
2625 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
2626 if (C->getValue() == 0) // 0 >>u V -> 0.
2627 return N->getOperand(0);
2631 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
2632 if (C->getValue() == 0 || // 0 >>s V -> 0.
2633 C->isAllOnesValue()) // -1 >>s V -> -1.
2634 return N->getOperand(0);
2638 case ISD::SINT_TO_FP:
2639 if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
2640 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
2641 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
2642 // We allow the src/dst to be either f32/f64, but the intermediate
2643 // type must be i64.
2644 if (N->getOperand(0).getValueType() == MVT::i64) {
2645 SDOperand Val = N->getOperand(0).getOperand(0);
2646 if (Val.getValueType() == MVT::f32) {
2647 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
2648 DCI.AddToWorklist(Val.Val);
2651 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val);
2652 DCI.AddToWorklist(Val.Val);
2653 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val);
2654 DCI.AddToWorklist(Val.Val);
2655 if (N->getValueType(0) == MVT::f32) {
2656 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val);
2657 DCI.AddToWorklist(Val.Val);
2660 } else if (N->getOperand(0).getValueType() == MVT::i32) {
2661 // If the intermediate type is i32, we can avoid the load/store here
2668 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
2669 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
2670 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
2671 N->getOperand(1).getValueType() == MVT::i32) {
2672 SDOperand Val = N->getOperand(1).getOperand(0);
2673 if (Val.getValueType() == MVT::f32) {
2674 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
2675 DCI.AddToWorklist(Val.Val);
2677 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val);
2678 DCI.AddToWorklist(Val.Val);
2680 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val,
2681 N->getOperand(2), N->getOperand(3));
2682 DCI.AddToWorklist(Val.Val);
2686 // Turn STORE (BSWAP) -> sthbrx/stwbrx.
2687 if (N->getOperand(1).getOpcode() == ISD::BSWAP &&
2688 N->getOperand(1).Val->hasOneUse() &&
2689 (N->getOperand(1).getValueType() == MVT::i32 ||
2690 N->getOperand(1).getValueType() == MVT::i16)) {
2691 SDOperand BSwapOp = N->getOperand(1).getOperand(0);
2692 // Do an any-extend to 32-bits if this is a half-word input.
2693 if (BSwapOp.getValueType() == MVT::i16)
2694 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp);
2696 return DAG.getNode(PPCISD::STBRX, MVT::Other, N->getOperand(0), BSwapOp,
2697 N->getOperand(2), N->getOperand(3),
2698 DAG.getValueType(N->getOperand(1).getValueType()));
2702 // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
2703 if (ISD::isNON_EXTLoad(N->getOperand(0).Val) &&
2704 N->getOperand(0).hasOneUse() &&
2705 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) {
2706 SDOperand Load = N->getOperand(0);
2707 LoadSDNode *LD = cast<LoadSDNode>(Load);
2708 // Create the byte-swapping load.
2709 std::vector<MVT::ValueType> VTs;
2710 VTs.push_back(MVT::i32);
2711 VTs.push_back(MVT::Other);
2712 SDOperand SV = DAG.getSrcValue(LD->getSrcValue(), LD->getSrcValueOffset());
2714 LD->getChain(), // Chain
2715 LD->getBasePtr(), // Ptr
2717 DAG.getValueType(N->getValueType(0)) // VT
2719 SDOperand BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4);
2721 // If this is an i16 load, insert the truncate.
2722 SDOperand ResVal = BSLoad;
2723 if (N->getValueType(0) == MVT::i16)
2724 ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad);
2726 // First, combine the bswap away. This makes the value produced by the
2728 DCI.CombineTo(N, ResVal);
2730 // Next, combine the load away, we give it a bogus result value but a real
2731 // chain result. The result value is dead because the bswap is dead.
2732 DCI.CombineTo(Load.Val, ResVal, BSLoad.getValue(1));
2734 // Return N so it doesn't get rechecked!
2735 return SDOperand(N, 0);
2739 case PPCISD::VCMP: {
2740 // If a VCMPo node already exists with exactly the same operands as this
2741 // node, use its result instead of this node (VCMPo computes both a CR6 and
2742 // a normal output).
2744 if (!N->getOperand(0).hasOneUse() &&
2745 !N->getOperand(1).hasOneUse() &&
2746 !N->getOperand(2).hasOneUse()) {
2748 // Scan all of the users of the LHS, looking for VCMPo's that match.
2749 SDNode *VCMPoNode = 0;
2751 SDNode *LHSN = N->getOperand(0).Val;
2752 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
2754 if ((*UI)->getOpcode() == PPCISD::VCMPo &&
2755 (*UI)->getOperand(1) == N->getOperand(1) &&
2756 (*UI)->getOperand(2) == N->getOperand(2) &&
2757 (*UI)->getOperand(0) == N->getOperand(0)) {
2762 // If there is no VCMPo node, or if the flag value has a single use, don't
2764 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
2767 // Look at the (necessarily single) use of the flag value. If it has a
2768 // chain, this transformation is more complex. Note that multiple things
2769 // could use the value result, which we should ignore.
2770 SDNode *FlagUser = 0;
2771 for (SDNode::use_iterator UI = VCMPoNode->use_begin();
2772 FlagUser == 0; ++UI) {
2773 assert(UI != VCMPoNode->use_end() && "Didn't find user!");
2775 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
2776 if (User->getOperand(i) == SDOperand(VCMPoNode, 1)) {
2783 // If the user is a MFCR instruction, we know this is safe. Otherwise we
2784 // give up for right now.
2785 if (FlagUser->getOpcode() == PPCISD::MFCR)
2786 return SDOperand(VCMPoNode, 0);
2791 // If this is a branch on an altivec predicate comparison, lower this so
2792 // that we don't have to do a MFCR: instead, branch directly on CR6. This
2793 // lowering is done pre-legalize, because the legalizer lowers the predicate
2794 // compare down to code that is difficult to reassemble.
2795 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
2796 SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3);
2800 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
2801 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
2802 getAltivecCompareInfo(LHS, CompareOpc, isDot)) {
2803 assert(isDot && "Can't compare against a vector result!");
2805 // If this is a comparison against something other than 0/1, then we know
2806 // that the condition is never/always true.
2807 unsigned Val = cast<ConstantSDNode>(RHS)->getValue();
2808 if (Val != 0 && Val != 1) {
2809 if (CC == ISD::SETEQ) // Cond never true, remove branch.
2810 return N->getOperand(0);
2811 // Always !=, turn it into an unconditional branch.
2812 return DAG.getNode(ISD::BR, MVT::Other,
2813 N->getOperand(0), N->getOperand(4));
2816 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
2818 // Create the PPCISD altivec 'dot' comparison node.
2819 std::vector<MVT::ValueType> VTs;
2821 LHS.getOperand(2), // LHS of compare
2822 LHS.getOperand(3), // RHS of compare
2823 DAG.getConstant(CompareOpc, MVT::i32)
2825 VTs.push_back(LHS.getOperand(2).getValueType());
2826 VTs.push_back(MVT::Flag);
2827 SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
2829 // Unpack the result based on how the target uses it.
2831 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getValue()) {
2832 default: // Can't happen, don't crash on invalid number though.
2833 case 0: // Branch on the value of the EQ bit of CR6.
2834 CompOpc = BranchOnWhenPredTrue ? PPC::BEQ : PPC::BNE;
2836 case 1: // Branch on the inverted value of the EQ bit of CR6.
2837 CompOpc = BranchOnWhenPredTrue ? PPC::BNE : PPC::BEQ;
2839 case 2: // Branch on the value of the LT bit of CR6.
2840 CompOpc = BranchOnWhenPredTrue ? PPC::BLT : PPC::BGE;
2842 case 3: // Branch on the inverted value of the LT bit of CR6.
2843 CompOpc = BranchOnWhenPredTrue ? PPC::BGE : PPC::BLT;
2847 return DAG.getNode(PPCISD::COND_BRANCH, MVT::Other, N->getOperand(0),
2848 DAG.getRegister(PPC::CR6, MVT::i32),
2849 DAG.getConstant(CompOpc, MVT::i32),
2850 N->getOperand(4), CompNode.getValue(1));
2859 //===----------------------------------------------------------------------===//
2860 // Inline Assembly Support
2861 //===----------------------------------------------------------------------===//
2863 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
2865 uint64_t &KnownZero,
2867 unsigned Depth) const {
2870 switch (Op.getOpcode()) {
2872 case PPCISD::LBRX: {
2873 // lhbrx is known to have the top bits cleared out.
2874 if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16)
2875 KnownZero = 0xFFFF0000;
2878 case ISD::INTRINSIC_WO_CHAIN: {
2879 switch (cast<ConstantSDNode>(Op.getOperand(0))->getValue()) {
2881 case Intrinsic::ppc_altivec_vcmpbfp_p:
2882 case Intrinsic::ppc_altivec_vcmpeqfp_p:
2883 case Intrinsic::ppc_altivec_vcmpequb_p:
2884 case Intrinsic::ppc_altivec_vcmpequh_p:
2885 case Intrinsic::ppc_altivec_vcmpequw_p:
2886 case Intrinsic::ppc_altivec_vcmpgefp_p:
2887 case Intrinsic::ppc_altivec_vcmpgtfp_p:
2888 case Intrinsic::ppc_altivec_vcmpgtsb_p:
2889 case Intrinsic::ppc_altivec_vcmpgtsh_p:
2890 case Intrinsic::ppc_altivec_vcmpgtsw_p:
2891 case Intrinsic::ppc_altivec_vcmpgtub_p:
2892 case Intrinsic::ppc_altivec_vcmpgtuh_p:
2893 case Intrinsic::ppc_altivec_vcmpgtuw_p:
2894 KnownZero = ~1U; // All bits but the low one are known to be zero.
2902 /// getConstraintType - Given a constraint letter, return the type of
2903 /// constraint it is for this target.
2904 PPCTargetLowering::ConstraintType
2905 PPCTargetLowering::getConstraintType(char ConstraintLetter) const {
2906 switch (ConstraintLetter) {
2913 return C_RegisterClass;
2915 return TargetLowering::getConstraintType(ConstraintLetter);
2918 std::pair<unsigned, const TargetRegisterClass*>
2919 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
2920 MVT::ValueType VT) const {
2921 if (Constraint.size() == 1) {
2922 // GCC RS6000 Constraint Letters
2923 switch (Constraint[0]) {
2926 if (VT == MVT::i64 && PPCSubTarget.isPPC64())
2927 return std::make_pair(0U, PPC::G8RCRegisterClass);
2928 return std::make_pair(0U, PPC::GPRCRegisterClass);
2931 return std::make_pair(0U, PPC::F4RCRegisterClass);
2932 else if (VT == MVT::f64)
2933 return std::make_pair(0U, PPC::F8RCRegisterClass);
2936 return std::make_pair(0U, PPC::VRRCRegisterClass);
2938 return std::make_pair(0U, PPC::CRRCRegisterClass);
2942 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
2946 // isOperandValidForConstraint
2947 SDOperand PPCTargetLowering::
2948 isOperandValidForConstraint(SDOperand Op, char Letter, SelectionDAG &DAG) {
2959 if (!isa<ConstantSDNode>(Op)) return SDOperand(0,0);// Must be an immediate.
2960 unsigned Value = cast<ConstantSDNode>(Op)->getValue();
2962 default: assert(0 && "Unknown constraint letter!");
2963 case 'I': // "I" is a signed 16-bit constant.
2964 if ((short)Value == (int)Value) return Op;
2966 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
2967 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
2968 if ((short)Value == 0) return Op;
2970 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
2971 if ((Value >> 16) == 0) return Op;
2973 case 'M': // "M" is a constant that is greater than 31.
2974 if (Value > 31) return Op;
2976 case 'N': // "N" is a positive constant that is an exact power of two.
2977 if ((int)Value > 0 && isPowerOf2_32(Value)) return Op;
2979 case 'O': // "O" is the constant zero.
2980 if (Value == 0) return Op;
2982 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
2983 if ((short)-Value == (int)-Value) return Op;
2990 // Handle standard constraint letters.
2991 return TargetLowering::isOperandValidForConstraint(Op, Letter, DAG);
2994 /// isLegalAddressImmediate - Return true if the integer value can be used
2995 /// as the offset of the target addressing mode.
2996 bool PPCTargetLowering::isLegalAddressImmediate(int64_t V) const {
2997 // PPC allows a sign-extended 16-bit immediate field.
2998 return (V > -(1 << 16) && V < (1 << 16)-1);
3001 bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
3002 return TargetLowering::isLegalAddressImmediate(GV);