1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the PPCISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "PPCISelLowering.h"
15 #include "PPCTargetMachine.h"
16 #include "llvm/ADT/VectorExtras.h"
17 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/SelectionDAG.h"
22 #include "llvm/CodeGen/SSARegMap.h"
23 #include "llvm/Constants.h"
24 #include "llvm/Function.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/Target/TargetOptions.h"
29 PPCTargetLowering::PPCTargetLowering(TargetMachine &TM)
30 : TargetLowering(TM) {
32 // Fold away setcc operations if possible.
33 setSetCCIsExpensive();
36 // Use _setjmp/_longjmp instead of setjmp/longjmp.
37 setUseUnderscoreSetJmpLongJmp(true);
39 // Set up the register classes.
40 addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
41 addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
42 addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
44 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
45 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
47 // PowerPC has no intrinsics for these particular operations
48 setOperationAction(ISD::MEMMOVE, MVT::Other, Expand);
49 setOperationAction(ISD::MEMSET, MVT::Other, Expand);
50 setOperationAction(ISD::MEMCPY, MVT::Other, Expand);
52 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
53 setOperationAction(ISD::SEXTLOAD, MVT::i1, Expand);
54 setOperationAction(ISD::SEXTLOAD, MVT::i8, Expand);
56 // PowerPC has no SREM/UREM instructions
57 setOperationAction(ISD::SREM, MVT::i32, Expand);
58 setOperationAction(ISD::UREM, MVT::i32, Expand);
60 // We don't support sin/cos/sqrt/fmod
61 setOperationAction(ISD::FSIN , MVT::f64, Expand);
62 setOperationAction(ISD::FCOS , MVT::f64, Expand);
63 setOperationAction(ISD::FREM , MVT::f64, Expand);
64 setOperationAction(ISD::FSIN , MVT::f32, Expand);
65 setOperationAction(ISD::FCOS , MVT::f32, Expand);
66 setOperationAction(ISD::FREM , MVT::f32, Expand);
68 // If we're enabling GP optimizations, use hardware square root
69 if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
70 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
71 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
74 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
75 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
77 // PowerPC does not have BSWAP, CTPOP or CTTZ
78 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
79 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
80 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
82 // PowerPC does not have ROTR
83 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
85 // PowerPC does not have Select
86 setOperationAction(ISD::SELECT, MVT::i32, Expand);
87 setOperationAction(ISD::SELECT, MVT::f32, Expand);
88 setOperationAction(ISD::SELECT, MVT::f64, Expand);
90 // PowerPC wants to turn select_cc of FP into fsel when possible.
91 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
92 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
94 // PowerPC wants to optimize integer setcc a bit
95 setOperationAction(ISD::SETCC, MVT::i32, Custom);
97 // PowerPC does not have BRCOND which requires SetCC
98 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
100 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
101 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
103 // PowerPC does not have [U|S]INT_TO_FP
104 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
105 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
107 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
108 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
110 // PowerPC does not have truncstore for i1.
111 setOperationAction(ISD::TRUNCSTORE, MVT::i1, Promote);
113 // Support label based line numbers.
114 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
115 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
116 // FIXME - use subtarget debug flags
117 if (!TM.getSubtarget<PPCSubtarget>().isDarwin())
118 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
120 // We want to legalize GlobalAddress and ConstantPool nodes into the
121 // appropriate instructions to materialize the address.
122 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
123 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
125 // RET must be custom lowered, to meet ABI requirements
126 setOperationAction(ISD::RET , MVT::Other, Custom);
128 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
129 setOperationAction(ISD::VASTART , MVT::Other, Custom);
131 // Use the default implementation.
132 setOperationAction(ISD::VAARG , MVT::Other, Expand);
133 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
134 setOperationAction(ISD::VAEND , MVT::Other, Expand);
135 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
136 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
137 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
139 if (TM.getSubtarget<PPCSubtarget>().is64Bit()) {
140 // They also have instructions for converting between i64 and fp.
141 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
142 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
143 // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
144 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
146 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
147 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
150 if (TM.getSubtarget<PPCSubtarget>().has64BitRegs()) {
151 // 64 bit PowerPC implementations can support i64 types directly
152 addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
153 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
154 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
156 // 32 bit PowerPC wants to expand i64 shifts itself.
157 setOperationAction(ISD::SHL, MVT::i64, Custom);
158 setOperationAction(ISD::SRL, MVT::i64, Custom);
159 setOperationAction(ISD::SRA, MVT::i64, Custom);
162 // First set operation action for all vector types to expand. Then we
163 // will selectively turn on ones that can be effectively codegen'd.
164 for (unsigned VT = (unsigned)MVT::Vector + 1;
165 VT != (unsigned)MVT::LAST_VALUETYPE; VT++) {
166 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
167 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
168 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
169 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand);
170 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand);
172 // FIXME: We don't support any BUILD_VECTOR's yet. We should custom expand
173 // the ones we do, like splat(0.0) and splat(-0.0).
174 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand);
177 if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
178 addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
179 addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
181 setOperationAction(ISD::ADD , MVT::v4f32, Legal);
182 setOperationAction(ISD::SUB , MVT::v4f32, Legal);
183 setOperationAction(ISD::MUL , MVT::v4f32, Legal);
184 setOperationAction(ISD::LOAD , MVT::v4f32, Legal);
185 setOperationAction(ISD::ADD , MVT::v4i32, Legal);
186 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
187 setOperationAction(ISD::LOAD , MVT::v16i8, Legal);
189 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom);
190 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
192 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
193 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
196 setSetCCResultContents(ZeroOrOneSetCCResult);
197 setStackPointerRegisterToSaveRestore(PPC::R1);
199 // We have target-specific dag combine patterns for the following nodes:
200 setTargetDAGCombine(ISD::SINT_TO_FP);
201 setTargetDAGCombine(ISD::STORE);
203 computeRegisterProperties();
206 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
209 case PPCISD::FSEL: return "PPCISD::FSEL";
210 case PPCISD::FCFID: return "PPCISD::FCFID";
211 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
212 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
213 case PPCISD::STFIWX: return "PPCISD::STFIWX";
214 case PPCISD::VMADDFP: return "PPCISD::VMADDFP";
215 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP";
216 case PPCISD::LVE_X: return "PPCISD::LVE_X";
217 case PPCISD::VPERM: return "PPCISD::VPERM";
218 case PPCISD::Hi: return "PPCISD::Hi";
219 case PPCISD::Lo: return "PPCISD::Lo";
220 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
221 case PPCISD::SRL: return "PPCISD::SRL";
222 case PPCISD::SRA: return "PPCISD::SRA";
223 case PPCISD::SHL: return "PPCISD::SHL";
224 case PPCISD::CALL: return "PPCISD::CALL";
225 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
229 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
230 static bool isFloatingPointZero(SDOperand Op) {
231 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
232 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
233 else if (Op.getOpcode() == ISD::EXTLOAD || Op.getOpcode() == ISD::LOAD) {
234 // Maybe this has already been legalized into the constant pool?
235 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
236 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->get()))
237 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
243 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
244 /// specifies a splat of a single element that is suitable for input to
245 /// VSPLTB/VSPLTH/VSPLTW.
246 bool PPC::isSplatShuffleMask(SDNode *N) {
247 assert(N->getOpcode() == ISD::BUILD_VECTOR);
251 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
252 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
253 unsigned PPC::getVSPLTImmediate(SDNode *N) {
254 assert(isSplatShuffleMask(N));
260 /// LowerOperation - Provide custom lowering hooks for some operations.
262 SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
263 switch (Op.getOpcode()) {
264 default: assert(0 && "Wasn't expecting to be able to lower this!");
265 case ISD::FP_TO_SINT: {
266 assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType()));
267 SDOperand Src = Op.getOperand(0);
268 if (Src.getValueType() == MVT::f32)
269 Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src);
272 switch (Op.getValueType()) {
273 default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!");
275 Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src);
278 Tmp = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Src);
282 // Convert the FP value to an int value through memory.
283 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Tmp);
284 if (Op.getValueType() == MVT::i32)
285 Bits = DAG.getNode(ISD::TRUNCATE, MVT::i32, Bits);
288 case ISD::SINT_TO_FP: {
289 assert(MVT::i64 == Op.getOperand(0).getValueType() &&
290 "Unhandled SINT_TO_FP type in custom expander!");
291 SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0));
292 SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits);
293 if (MVT::f32 == Op.getValueType())
294 FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP);
297 case ISD::SELECT_CC: {
298 // Turn FP only select_cc's into fsel instructions.
299 if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) ||
300 !MVT::isFloatingPoint(Op.getOperand(2).getValueType()))
303 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
305 // Cannot handle SETEQ/SETNE.
306 if (CC == ISD::SETEQ || CC == ISD::SETNE) break;
308 MVT::ValueType ResVT = Op.getValueType();
309 MVT::ValueType CmpVT = Op.getOperand(0).getValueType();
310 SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
311 SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3);
313 // If the RHS of the comparison is a 0.0, we don't need to do the
314 // subtraction at all.
315 if (isFloatingPointZero(RHS))
317 default: break; // SETUO etc aren't handled by fsel.
320 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
323 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
324 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
325 return DAG.getNode(PPCISD::FSEL, ResVT, LHS, TV, FV);
328 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
331 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
332 LHS = DAG.getNode(ISD::FP_EXTEND, MVT::f64, LHS);
333 return DAG.getNode(PPCISD::FSEL, ResVT,
334 DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV);
339 default: break; // SETUO etc aren't handled by fsel.
342 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
343 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
344 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
345 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
348 Cmp = DAG.getNode(ISD::FSUB, CmpVT, LHS, RHS);
349 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
350 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
351 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
354 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
355 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
356 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
357 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, FV, TV);
360 Cmp = DAG.getNode(ISD::FSUB, CmpVT, RHS, LHS);
361 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
362 Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp);
363 return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV);
368 assert(Op.getValueType() == MVT::i64 &&
369 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
370 // The generic code does a fine job expanding shift by a constant.
371 if (isa<ConstantSDNode>(Op.getOperand(1))) break;
373 // Otherwise, expand into a bunch of logical ops. Note that these ops
374 // depend on the PPC behavior for oversized shift amounts.
375 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
376 DAG.getConstant(0, MVT::i32));
377 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
378 DAG.getConstant(1, MVT::i32));
379 SDOperand Amt = Op.getOperand(1);
381 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
382 DAG.getConstant(32, MVT::i32), Amt);
383 SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Amt);
384 SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Tmp1);
385 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
386 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
387 DAG.getConstant(-32U, MVT::i32));
388 SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Tmp5);
389 SDOperand OutHi = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
390 SDOperand OutLo = DAG.getNode(PPCISD::SHL, MVT::i32, Lo, Amt);
391 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
394 assert(Op.getValueType() == MVT::i64 &&
395 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SHL!");
396 // The generic code does a fine job expanding shift by a constant.
397 if (isa<ConstantSDNode>(Op.getOperand(1))) break;
399 // Otherwise, expand into a bunch of logical ops. Note that these ops
400 // depend on the PPC behavior for oversized shift amounts.
401 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
402 DAG.getConstant(0, MVT::i32));
403 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
404 DAG.getConstant(1, MVT::i32));
405 SDOperand Amt = Op.getOperand(1);
407 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
408 DAG.getConstant(32, MVT::i32), Amt);
409 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
410 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
411 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
412 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
413 DAG.getConstant(-32U, MVT::i32));
414 SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Tmp5);
415 SDOperand OutLo = DAG.getNode(ISD::OR, MVT::i32, Tmp4, Tmp6);
416 SDOperand OutHi = DAG.getNode(PPCISD::SRL, MVT::i32, Hi, Amt);
417 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
420 assert(Op.getValueType() == MVT::i64 &&
421 Op.getOperand(1).getValueType() == MVT::i32 && "Unexpected SRA!");
422 // The generic code does a fine job expanding shift by a constant.
423 if (isa<ConstantSDNode>(Op.getOperand(1))) break;
425 // Otherwise, expand into a bunch of logical ops, followed by a select_cc.
426 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
427 DAG.getConstant(0, MVT::i32));
428 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0),
429 DAG.getConstant(1, MVT::i32));
430 SDOperand Amt = Op.getOperand(1);
432 SDOperand Tmp1 = DAG.getNode(ISD::SUB, MVT::i32,
433 DAG.getConstant(32, MVT::i32), Amt);
434 SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, MVT::i32, Lo, Amt);
435 SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, MVT::i32, Hi, Tmp1);
436 SDOperand Tmp4 = DAG.getNode(ISD::OR , MVT::i32, Tmp2, Tmp3);
437 SDOperand Tmp5 = DAG.getNode(ISD::ADD, MVT::i32, Amt,
438 DAG.getConstant(-32U, MVT::i32));
439 SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Tmp5);
440 SDOperand OutHi = DAG.getNode(PPCISD::SRA, MVT::i32, Hi, Amt);
441 SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, MVT::i32),
442 Tmp4, Tmp6, ISD::SETLE);
443 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OutLo, OutHi);
445 case ISD::ConstantPool: {
446 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
447 Constant *C = CP->get();
448 SDOperand CPI = DAG.getTargetConstantPool(C, MVT::i32, CP->getAlignment());
449 SDOperand Zero = DAG.getConstant(0, MVT::i32);
451 if (getTargetMachine().getRelocationModel() == Reloc::Static) {
452 // Generate non-pic code that has direct accesses to the constant pool.
453 // The address of the global is just (hi(&g)+lo(&g)).
454 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero);
455 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero);
456 return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
459 // Only lower ConstantPool on Darwin.
460 if (!getTargetMachine().getSubtarget<PPCSubtarget>().isDarwin()) break;
461 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, CPI, Zero);
462 if (getTargetMachine().getRelocationModel() == Reloc::PIC) {
463 // With PIC, the first instruction is actually "GR+hi(&G)".
464 Hi = DAG.getNode(ISD::ADD, MVT::i32,
465 DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi);
468 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, CPI, Zero);
469 Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
472 case ISD::GlobalAddress: {
473 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
474 GlobalValue *GV = GSDN->getGlobal();
475 SDOperand GA = DAG.getTargetGlobalAddress(GV, MVT::i32, GSDN->getOffset());
476 SDOperand Zero = DAG.getConstant(0, MVT::i32);
478 if (getTargetMachine().getRelocationModel() == Reloc::Static) {
479 // Generate non-pic code that has direct accesses to globals.
480 // The address of the global is just (hi(&g)+lo(&g)).
481 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero);
482 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero);
483 return DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
486 // Only lower GlobalAddress on Darwin.
487 if (!getTargetMachine().getSubtarget<PPCSubtarget>().isDarwin()) break;
489 SDOperand Hi = DAG.getNode(PPCISD::Hi, MVT::i32, GA, Zero);
490 if (getTargetMachine().getRelocationModel() == Reloc::PIC) {
491 // With PIC, the first instruction is actually "GR+hi(&G)".
492 Hi = DAG.getNode(ISD::ADD, MVT::i32,
493 DAG.getNode(PPCISD::GlobalBaseReg, MVT::i32), Hi);
496 SDOperand Lo = DAG.getNode(PPCISD::Lo, MVT::i32, GA, Zero);
497 Lo = DAG.getNode(ISD::ADD, MVT::i32, Hi, Lo);
499 if (!GV->hasWeakLinkage() && !GV->hasLinkOnceLinkage() &&
500 (!GV->isExternal() || GV->hasNotBeenReadFromBytecode()))
503 // If the global is weak or external, we have to go through the lazy
505 return DAG.getLoad(MVT::i32, DAG.getEntryNode(), Lo, DAG.getSrcValue(0));
508 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
510 // If we're comparing for equality to zero, expose the fact that this is
511 // implented as a ctlz/srl pair on ppc, so that the dag combiner can
512 // fold the new nodes.
513 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
514 if (C->isNullValue() && CC == ISD::SETEQ) {
515 MVT::ValueType VT = Op.getOperand(0).getValueType();
516 SDOperand Zext = Op.getOperand(0);
519 Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0));
521 unsigned Log2b = Log2_32(MVT::getSizeInBits(VT));
522 SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext);
523 SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz,
524 DAG.getConstant(Log2b, getShiftAmountTy()));
525 return DAG.getNode(ISD::TRUNCATE, getSetCCResultTy(), Scc);
527 // Leave comparisons against 0 and -1 alone for now, since they're usually
528 // optimized. FIXME: revisit this when we can custom lower all setcc
530 if (C->isAllOnesValue() || C->isNullValue())
534 // If we have an integer seteq/setne, turn it into a compare against zero
535 // by subtracting the rhs from the lhs, which is faster than setting a
536 // condition register, reading it back out, and masking the correct bit.
537 MVT::ValueType LHSVT = Op.getOperand(0).getValueType();
538 if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
539 MVT::ValueType VT = Op.getValueType();
540 SDOperand Sub = DAG.getNode(ISD::SUB, LHSVT, Op.getOperand(0),
542 return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC);
547 // vastart just stores the address of the VarArgsFrameIndex slot into the
548 // memory location argument.
549 // FIXME: Replace MVT::i32 with PointerTy
550 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
551 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
552 Op.getOperand(1), Op.getOperand(2));
557 switch(Op.getNumOperands()) {
559 assert(0 && "Do not know how to return this many arguments!");
562 return SDOperand(); // ret void is legal
564 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
565 unsigned ArgReg = MVT::isInteger(ArgVT) ? PPC::R3 : PPC::F1;
566 Copy = DAG.getCopyToReg(Op.getOperand(0), ArgReg, Op.getOperand(1),
571 Copy = DAG.getCopyToReg(Op.getOperand(0), PPC::R3, Op.getOperand(2),
573 Copy = DAG.getCopyToReg(Copy, PPC::R4, Op.getOperand(1),Copy.getValue(1));
576 return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1));
578 case ISD::SCALAR_TO_VECTOR: {
579 // Create a stack slot that is 16-byte aligned.
580 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
581 int FrameIdx = FrameInfo->CreateStackObject(16, 16);
582 SDOperand FIdx = DAG.getFrameIndex(FrameIdx, MVT::i32);
584 // Store the input value into Value#0 of the stack slot.
585 unsigned InSize = MVT::getSizeInBits(Op.getOperand(0).getValueType())/8;
586 FIdx = DAG.getNode(ISD::ADD, MVT::i32, FIdx,
587 DAG.getConstant(16-InSize, MVT::i32));
588 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(),
589 Op.getOperand(0), FIdx,DAG.getSrcValue(NULL));
590 return DAG.getNode(PPCISD::LVE_X, Op.getValueType(), Store, FIdx,
591 DAG.getSrcValue(NULL));
593 case ISD::VECTOR_SHUFFLE: {
594 // FIXME: Cases that are handled by instructions that take permute
595 // immediates (such as vsplt*) shouldn't be lowered here! Also handle cases
596 // that are cheaper to do as multiple such instructions than as a constant
597 // pool load/vperm pair.
599 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
600 // vector that will get spilled to the constant pool.
601 SDOperand V1 = Op.getOperand(0);
602 SDOperand V2 = Op.getOperand(1);
603 if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
604 SDOperand PermMask = Op.getOperand(2);
606 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
607 // that it is in input element units, not in bytes. Convert now.
608 MVT::ValueType EltVT = MVT::getVectorBaseType(V1.getValueType());
609 unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8;
611 std::vector<SDOperand> ResultMask;
612 for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
613 unsigned SrcElt =cast<ConstantSDNode>(PermMask.getOperand(i))->getValue();
615 for (unsigned j = 0; j != BytesPerElement; ++j)
616 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
620 SDOperand VPermMask =DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, ResultMask);
621 return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask);
627 std::vector<SDOperand>
628 PPCTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
630 // add beautiful description of PPC stack frame format, or at least some docs
632 MachineFunction &MF = DAG.getMachineFunction();
633 MachineFrameInfo *MFI = MF.getFrameInfo();
634 MachineBasicBlock& BB = MF.front();
635 SSARegMap *RegMap = MF.getSSARegMap();
636 std::vector<SDOperand> ArgValues;
638 unsigned ArgOffset = 24;
639 unsigned GPR_remaining = 8;
640 unsigned FPR_remaining = 13;
641 unsigned GPR_idx = 0, FPR_idx = 0;
642 static const unsigned GPR[] = {
643 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
644 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
646 static const unsigned FPR[] = {
647 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
648 PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
651 // Add DAG nodes to load the arguments... On entry to a function on PPC,
652 // the arguments start at offset 24, although they are likely to be passed
654 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
655 SDOperand newroot, argt;
657 bool needsLoad = false;
658 bool ArgLive = !I->use_empty();
659 MVT::ValueType ObjectVT = getValueType(I->getType());
662 default: assert(0 && "Unhandled argument type!");
669 if (GPR_remaining > 0) {
670 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
671 MF.addLiveIn(GPR[GPR_idx], VReg);
672 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
673 if (ObjectVT != MVT::i32) {
674 unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext
676 argt = DAG.getNode(AssertOp, MVT::i32, argt,
677 DAG.getValueType(ObjectVT));
678 argt = DAG.getNode(ISD::TRUNCATE, ObjectVT, argt);
687 if (GPR_remaining > 0) {
688 SDOperand argHi, argLo;
689 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
690 MF.addLiveIn(GPR[GPR_idx], VReg);
691 argHi = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
692 // If we have two or more remaining argument registers, then both halves
693 // of the i64 can be sourced from there. Otherwise, the lower half will
694 // have to come off the stack. This can happen when an i64 is preceded
695 // by 28 bytes of arguments.
696 if (GPR_remaining > 1) {
697 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
698 MF.addLiveIn(GPR[GPR_idx+1], VReg);
699 argLo = DAG.getCopyFromReg(argHi, VReg, MVT::i32);
701 int FI = MFI->CreateFixedObject(4, ArgOffset+4);
702 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
703 argLo = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
704 DAG.getSrcValue(NULL));
706 // Build the outgoing arg thingy
707 argt = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, argLo, argHi);
715 ObjSize = (ObjectVT == MVT::f64) ? 8 : 4;
717 if (FPR_remaining > 0) {
723 if (FPR_remaining > 0) {
725 if (ObjectVT == MVT::f32)
726 VReg = RegMap->createVirtualRegister(&PPC::F4RCRegClass);
728 VReg = RegMap->createVirtualRegister(&PPC::F8RCRegClass);
729 MF.addLiveIn(FPR[FPR_idx], VReg);
730 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), VReg, ObjectVT);
739 // We need to load the argument to a virtual register if we determined above
740 // that we ran out of physical registers of the appropriate type
742 unsigned SubregOffset = 0;
743 if (ObjectVT == MVT::i8 || ObjectVT == MVT::i1) SubregOffset = 3;
744 if (ObjectVT == MVT::i16) SubregOffset = 2;
745 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
746 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
747 FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN,
748 DAG.getConstant(SubregOffset, MVT::i32));
749 argt = newroot = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
750 DAG.getSrcValue(NULL));
753 // Every 4 bytes of argument space consumes one of the GPRs available for
755 if (GPR_remaining > 0) {
756 unsigned delta = (GPR_remaining > 1 && ObjSize == 8) ? 2 : 1;
757 GPR_remaining -= delta;
760 ArgOffset += ObjSize;
762 DAG.setRoot(newroot.getValue(1));
764 ArgValues.push_back(argt);
767 // If the function takes variable number of arguments, make a frame index for
768 // the start of the first vararg value... for expansion of llvm.va_start.
770 VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset);
771 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
772 // If this function is vararg, store any remaining integer argument regs
773 // to their spots on the stack so that they may be loaded by deferencing the
774 // result of va_next.
775 std::vector<SDOperand> MemOps;
776 for (; GPR_remaining > 0; --GPR_remaining, ++GPR_idx) {
777 unsigned VReg = RegMap->createVirtualRegister(&PPC::GPRCRegClass);
778 MF.addLiveIn(GPR[GPR_idx], VReg);
779 SDOperand Val = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
780 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1),
781 Val, FIN, DAG.getSrcValue(NULL));
782 MemOps.push_back(Store);
783 // Increment the address by four for the next argument to store
784 SDOperand PtrOff = DAG.getConstant(4, getPointerTy());
785 FIN = DAG.getNode(ISD::ADD, MVT::i32, FIN, PtrOff);
787 if (!MemOps.empty()) {
788 MemOps.push_back(DAG.getRoot());
789 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps));
793 // Finally, inform the code generator which regs we return values in.
794 switch (getValueType(F.getReturnType())) {
795 default: assert(0 && "Unknown type!");
796 case MVT::isVoid: break;
801 MF.addLiveOut(PPC::R3);
804 MF.addLiveOut(PPC::R3);
805 MF.addLiveOut(PPC::R4);
809 MF.addLiveOut(PPC::F1);
816 std::pair<SDOperand, SDOperand>
817 PPCTargetLowering::LowerCallTo(SDOperand Chain,
818 const Type *RetTy, bool isVarArg,
819 unsigned CallingConv, bool isTailCall,
820 SDOperand Callee, ArgListTy &Args,
822 // args_to_use will accumulate outgoing args for the PPCISD::CALL case in
823 // SelectExpr to use to put the arguments in the appropriate registers.
824 std::vector<SDOperand> args_to_use;
826 // Count how many bytes are to be pushed on the stack, including the linkage
827 // area, and parameter passing area.
828 unsigned NumBytes = 24;
831 Chain = DAG.getCALLSEQ_START(Chain,
832 DAG.getConstant(NumBytes, getPointerTy()));
834 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
835 switch (getValueType(Args[i].second)) {
836 default: assert(0 && "Unknown value type!");
851 // Just to be safe, we'll always reserve the full 24 bytes of linkage area
852 // plus 32 bytes of argument space in case any called code gets funky on us.
853 // (Required by ABI to support var arg)
854 if (NumBytes < 56) NumBytes = 56;
856 // Adjust the stack pointer for the new arguments...
857 // These operations are automatically eliminated by the prolog/epilog pass
858 Chain = DAG.getCALLSEQ_START(Chain,
859 DAG.getConstant(NumBytes, getPointerTy()));
861 // Set up a copy of the stack pointer for use loading and storing any
862 // arguments that may not fit in the registers available for argument
864 SDOperand StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
866 // Figure out which arguments are going to go in registers, and which in
867 // memory. Also, if this is a vararg function, floating point operations
868 // must be stored to our stack, and loaded into integer regs as well, if
869 // any integer regs are available for argument passing.
870 unsigned ArgOffset = 24;
871 unsigned GPR_remaining = 8;
872 unsigned FPR_remaining = 13;
874 std::vector<SDOperand> MemOps;
875 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
876 // PtrOff will be used to store the current argument to the stack if a
877 // register cannot be found for it.
878 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
879 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
880 MVT::ValueType ArgVT = getValueType(Args[i].second);
883 default: assert(0 && "Unexpected ValueType for argument!");
887 // Promote the integer to 32 bits. If the input type is signed use a
888 // sign extend, otherwise use a zero extend.
889 if (Args[i].second->isSigned())
890 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
892 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
895 if (GPR_remaining > 0) {
896 args_to_use.push_back(Args[i].first);
899 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
900 Args[i].first, PtrOff,
901 DAG.getSrcValue(NULL)));
906 // If we have one free GPR left, we can place the upper half of the i64
907 // in it, and store the other half to the stack. If we have two or more
908 // free GPRs, then we can pass both halves of the i64 in registers.
909 if (GPR_remaining > 0) {
910 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
911 Args[i].first, DAG.getConstant(1, MVT::i32));
912 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
913 Args[i].first, DAG.getConstant(0, MVT::i32));
914 args_to_use.push_back(Hi);
916 if (GPR_remaining > 0) {
917 args_to_use.push_back(Lo);
920 SDOperand ConstFour = DAG.getConstant(4, getPointerTy());
921 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour);
922 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
923 Lo, PtrOff, DAG.getSrcValue(NULL)));
926 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
927 Args[i].first, PtrOff,
928 DAG.getSrcValue(NULL)));
934 if (FPR_remaining > 0) {
935 args_to_use.push_back(Args[i].first);
938 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, Chain,
939 Args[i].first, PtrOff,
940 DAG.getSrcValue(NULL));
941 MemOps.push_back(Store);
942 // Float varargs are always shadowed in available integer registers
943 if (GPR_remaining > 0) {
944 SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff,
945 DAG.getSrcValue(NULL));
946 MemOps.push_back(Load.getValue(1));
947 args_to_use.push_back(Load);
950 if (GPR_remaining > 0 && MVT::f64 == ArgVT) {
951 SDOperand ConstFour = DAG.getConstant(4, getPointerTy());
952 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, PtrOff, ConstFour);
953 SDOperand Load = DAG.getLoad(MVT::i32, Store, PtrOff,
954 DAG.getSrcValue(NULL));
955 MemOps.push_back(Load.getValue(1));
956 args_to_use.push_back(Load);
960 // If we have any FPRs remaining, we may also have GPRs remaining.
961 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
963 if (GPR_remaining > 0) {
964 args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32));
967 if (GPR_remaining > 0 && MVT::f64 == ArgVT) {
968 args_to_use.push_back(DAG.getNode(ISD::UNDEF, MVT::i32));
973 MemOps.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
974 Args[i].first, PtrOff,
975 DAG.getSrcValue(NULL)));
977 ArgOffset += (ArgVT == MVT::f32) ? 4 : 8;
982 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, MemOps);
985 std::vector<MVT::ValueType> RetVals;
986 MVT::ValueType RetTyVT = getValueType(RetTy);
987 MVT::ValueType ActualRetTyVT = RetTyVT;
988 if (RetTyVT >= MVT::i1 && RetTyVT <= MVT::i16)
989 ActualRetTyVT = MVT::i32; // Promote result to i32.
991 if (RetTyVT == MVT::i64) {
992 RetVals.push_back(MVT::i32);
993 RetVals.push_back(MVT::i32);
994 } else if (RetTyVT != MVT::isVoid) {
995 RetVals.push_back(ActualRetTyVT);
997 RetVals.push_back(MVT::Other);
999 // If the callee is a GlobalAddress node (quite common, every direct call is)
1000 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1001 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1002 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
1004 std::vector<SDOperand> Ops;
1005 Ops.push_back(Chain);
1006 Ops.push_back(Callee);
1007 Ops.insert(Ops.end(), args_to_use.begin(), args_to_use.end());
1008 SDOperand TheCall = DAG.getNode(PPCISD::CALL, RetVals, Ops);
1009 Chain = TheCall.getValue(TheCall.Val->getNumValues()-1);
1010 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
1011 DAG.getConstant(NumBytes, getPointerTy()));
1012 SDOperand RetVal = TheCall;
1014 // If the result is a small value, add a note so that we keep track of the
1015 // information about whether it is sign or zero extended.
1016 if (RetTyVT != ActualRetTyVT) {
1017 RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext : ISD::AssertZext,
1018 MVT::i32, RetVal, DAG.getValueType(RetTyVT));
1019 RetVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, RetVal);
1020 } else if (RetTyVT == MVT::i64) {
1021 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, RetVal, RetVal.getValue(1));
1024 return std::make_pair(RetVal, Chain);
1028 PPCTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1029 MachineBasicBlock *BB) {
1030 assert((MI->getOpcode() == PPC::SELECT_CC_Int ||
1031 MI->getOpcode() == PPC::SELECT_CC_F4 ||
1032 MI->getOpcode() == PPC::SELECT_CC_F8) &&
1033 "Unexpected instr type to insert");
1035 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1036 // control-flow pattern. The incoming instruction knows the destination vreg
1037 // to set, the condition code register to branch on, the true/false values to
1038 // select between, and a branch opcode to use.
1039 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1040 ilist<MachineBasicBlock>::iterator It = BB;
1046 // cmpTY ccX, r1, r2
1048 // fallthrough --> copy0MBB
1049 MachineBasicBlock *thisMBB = BB;
1050 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
1051 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
1052 BuildMI(BB, MI->getOperand(4).getImmedValue(), 2)
1053 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
1054 MachineFunction *F = BB->getParent();
1055 F->getBasicBlockList().insert(It, copy0MBB);
1056 F->getBasicBlockList().insert(It, sinkMBB);
1057 // Update machine-CFG edges
1058 BB->addSuccessor(copy0MBB);
1059 BB->addSuccessor(sinkMBB);
1062 // %FalseValue = ...
1063 // # fallthrough to sinkMBB
1066 // Update machine-CFG edges
1067 BB->addSuccessor(sinkMBB);
1070 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1073 BuildMI(BB, PPC::PHI, 4, MI->getOperand(0).getReg())
1074 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
1075 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1077 delete MI; // The pseudo instruction is gone now.
1081 SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N,
1082 DAGCombinerInfo &DCI) const {
1083 TargetMachine &TM = getTargetMachine();
1084 SelectionDAG &DAG = DCI.DAG;
1085 switch (N->getOpcode()) {
1087 case ISD::SINT_TO_FP:
1088 if (TM.getSubtarget<PPCSubtarget>().is64Bit()) {
1089 // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
1090 // We allow the src/dst to be either f32/f64, but force the intermediate
1092 if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT &&
1093 N->getOperand(0).getValueType() == MVT::i64) {
1095 SDOperand Val = N->getOperand(0).getOperand(0);
1096 if (Val.getValueType() == MVT::f32) {
1097 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
1098 DCI.AddToWorklist(Val.Val);
1101 Val = DAG.getNode(PPCISD::FCTIDZ, MVT::f64, Val);
1102 DCI.AddToWorklist(Val.Val);
1103 Val = DAG.getNode(PPCISD::FCFID, MVT::f64, Val);
1104 DCI.AddToWorklist(Val.Val);
1105 if (N->getValueType(0) == MVT::f32) {
1106 Val = DAG.getNode(ISD::FP_ROUND, MVT::f32, Val);
1107 DCI.AddToWorklist(Val.Val);
1114 // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
1115 if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
1116 N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
1117 N->getOperand(1).getValueType() == MVT::i32) {
1118 SDOperand Val = N->getOperand(1).getOperand(0);
1119 if (Val.getValueType() == MVT::f32) {
1120 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
1121 DCI.AddToWorklist(Val.Val);
1123 Val = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Val);
1124 DCI.AddToWorklist(Val.Val);
1126 Val = DAG.getNode(PPCISD::STFIWX, MVT::Other, N->getOperand(0), Val,
1127 N->getOperand(2), N->getOperand(3));
1128 DCI.AddToWorklist(Val.Val);
1137 /// getConstraintType - Given a constraint letter, return the type of
1138 /// constraint it is for this target.
1139 PPCTargetLowering::ConstraintType
1140 PPCTargetLowering::getConstraintType(char ConstraintLetter) const {
1141 switch (ConstraintLetter) {
1148 return C_RegisterClass;
1150 return TargetLowering::getConstraintType(ConstraintLetter);
1154 std::vector<unsigned> PPCTargetLowering::
1155 getRegClassForInlineAsmConstraint(const std::string &Constraint,
1156 MVT::ValueType VT) const {
1157 if (Constraint.size() == 1) {
1158 switch (Constraint[0]) { // GCC RS6000 Constraint Letters
1159 default: break; // Unknown constriant letter
1161 return make_vector<unsigned>(/*no R0*/ PPC::R1 , PPC::R2 , PPC::R3 ,
1162 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 ,
1163 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11,
1164 PPC::R12, PPC::R13, PPC::R14, PPC::R15,
1165 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
1166 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
1167 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
1168 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
1171 return make_vector<unsigned>(PPC::R0 , PPC::R1 , PPC::R2 , PPC::R3 ,
1172 PPC::R4 , PPC::R5 , PPC::R6 , PPC::R7 ,
1173 PPC::R8 , PPC::R9 , PPC::R10, PPC::R11,
1174 PPC::R12, PPC::R13, PPC::R14, PPC::R15,
1175 PPC::R16, PPC::R17, PPC::R18, PPC::R19,
1176 PPC::R20, PPC::R21, PPC::R22, PPC::R23,
1177 PPC::R24, PPC::R25, PPC::R26, PPC::R27,
1178 PPC::R28, PPC::R29, PPC::R30, PPC::R31,
1181 return make_vector<unsigned>(PPC::F0 , PPC::F1 , PPC::F2 , PPC::F3 ,
1182 PPC::F4 , PPC::F5 , PPC::F6 , PPC::F7 ,
1183 PPC::F8 , PPC::F9 , PPC::F10, PPC::F11,
1184 PPC::F12, PPC::F13, PPC::F14, PPC::F15,
1185 PPC::F16, PPC::F17, PPC::F18, PPC::F19,
1186 PPC::F20, PPC::F21, PPC::F22, PPC::F23,
1187 PPC::F24, PPC::F25, PPC::F26, PPC::F27,
1188 PPC::F28, PPC::F29, PPC::F30, PPC::F31,
1191 return make_vector<unsigned>(PPC::V0 , PPC::V1 , PPC::V2 , PPC::V3 ,
1192 PPC::V4 , PPC::V5 , PPC::V6 , PPC::V7 ,
1193 PPC::V8 , PPC::V9 , PPC::V10, PPC::V11,
1194 PPC::V12, PPC::V13, PPC::V14, PPC::V15,
1195 PPC::V16, PPC::V17, PPC::V18, PPC::V19,
1196 PPC::V20, PPC::V21, PPC::V22, PPC::V23,
1197 PPC::V24, PPC::V25, PPC::V26, PPC::V27,
1198 PPC::V28, PPC::V29, PPC::V30, PPC::V31,
1201 return make_vector<unsigned>(PPC::CR0, PPC::CR1, PPC::CR2, PPC::CR3,
1202 PPC::CR4, PPC::CR5, PPC::CR6, PPC::CR7,
1207 return std::vector<unsigned>();
1210 // isOperandValidForConstraint
1211 bool PPCTargetLowering::
1212 isOperandValidForConstraint(SDOperand Op, char Letter) {
1223 if (!isa<ConstantSDNode>(Op)) return false; // Must be an immediate.
1224 unsigned Value = cast<ConstantSDNode>(Op)->getValue();
1226 default: assert(0 && "Unknown constraint letter!");
1227 case 'I': // "I" is a signed 16-bit constant.
1228 return (short)Value == (int)Value;
1229 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
1230 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
1231 return (short)Value == 0;
1232 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
1233 return (Value >> 16) == 0;
1234 case 'M': // "M" is a constant that is greater than 31.
1236 case 'N': // "N" is a positive constant that is an exact power of two.
1237 return (int)Value > 0 && isPowerOf2_32(Value);
1238 case 'O': // "O" is the constant zero.
1240 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
1241 return (short)-Value == (int)-Value;
1247 // Handle standard constraint letters.
1248 return TargetLowering::isOperandValidForConstraint(Op, Letter);
1251 /// isLegalAddressImmediate - Return true if the integer value can be used
1252 /// as the offset of the target addressing mode.
1253 bool PPCTargetLowering::isLegalAddressImmediate(int64_t V) const {
1254 // PPC allows a sign-extended 16-bit immediate field.
1255 return (V > -(1 << 16) && V < (1 << 16)-1);