1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
16 #include "X86ISelLowering.h"
17 #include "X86TargetMachine.h"
18 #include "llvm/CallingConv.h"
19 #include "llvm/Function.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SSARegMap.h"
25 #include "llvm/Target/TargetOptions.h"
29 #include "llvm/Support/CommandLine.h"
30 static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
31 cl::desc("Enable fastcc on X86"));
33 X86TargetLowering::X86TargetLowering(TargetMachine &TM)
34 : TargetLowering(TM) {
35 // Set up the TargetLowering object.
37 // X86 is weird, it always uses i8 for shift amounts and setcc results.
38 setShiftAmountType(MVT::i8);
39 setSetCCResultType(MVT::i8);
40 setSetCCResultContents(ZeroOrOneSetCCResult);
41 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
42 setStackPointerRegisterToSaveRestore(X86::ESP);
44 // Set up the register classes.
45 addRegisterClass(MVT::i8, X86::R8RegisterClass);
46 addRegisterClass(MVT::i16, X86::R16RegisterClass);
47 addRegisterClass(MVT::i32, X86::R32RegisterClass);
49 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
51 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
52 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
53 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
54 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
56 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
58 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
59 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
62 // We can handle SINT_TO_FP and FP_TO_SINT from/TO i64 even though i64
64 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
65 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
66 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
67 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
70 // Handle FP_TO_UINT by promoting the destination to a larger signed
72 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
73 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
74 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
77 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
79 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
81 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
82 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
83 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
85 setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
86 setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
89 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
91 setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
92 setOperationAction(ISD::BRTWOWAY_CC , MVT::Other, Expand);
93 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
94 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
95 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
96 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
97 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
98 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
99 setOperationAction(ISD::FREM , MVT::f64 , Expand);
100 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
101 setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
102 setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
103 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
104 setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
105 setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
106 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
107 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
108 setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
109 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
112 setOperationAction(ISD::BSWAP , MVT::i32 , Expand);
113 setOperationAction(ISD::ROTL , MVT::i8 , Expand);
114 setOperationAction(ISD::ROTR , MVT::i8 , Expand);
115 setOperationAction(ISD::ROTL , MVT::i16 , Expand);
116 setOperationAction(ISD::ROTR , MVT::i16 , Expand);
117 setOperationAction(ISD::ROTL , MVT::i32 , Expand);
118 setOperationAction(ISD::ROTR , MVT::i32 , Expand);
120 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
122 setOperationAction(ISD::READIO , MVT::i1 , Expand);
123 setOperationAction(ISD::READIO , MVT::i8 , Expand);
124 setOperationAction(ISD::READIO , MVT::i16 , Expand);
125 setOperationAction(ISD::READIO , MVT::i32 , Expand);
126 setOperationAction(ISD::WRITEIO , MVT::i1 , Expand);
127 setOperationAction(ISD::WRITEIO , MVT::i8 , Expand);
128 setOperationAction(ISD::WRITEIO , MVT::i16 , Expand);
129 setOperationAction(ISD::WRITEIO , MVT::i32 , Expand);
131 // These should be promoted to a larger select which is supported.
132 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
133 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
135 // X86 wants to expand cmov itself.
136 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
137 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
138 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
139 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
140 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
141 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
142 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
143 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
144 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
145 // X86 ret instruction may pop stack.
146 setOperationAction(ISD::RET , MVT::Other, Custom);
148 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
149 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
150 setOperationAction(ISD::ADD_PARTS , MVT::i32 , Custom);
151 setOperationAction(ISD::SUB_PARTS , MVT::i32 , Custom);
152 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
153 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
154 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
155 // X86 wants to expand memset / memcpy itself.
156 setOperationAction(ISD::MEMSET , MVT::Other, Custom);
157 setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
160 // We don't have line number support yet.
161 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
162 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
163 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
165 // Expand to the default code.
166 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
167 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
168 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
171 // Set up the FP register classes.
172 addRegisterClass(MVT::f32, X86::FR32RegisterClass);
173 addRegisterClass(MVT::f64, X86::FR64RegisterClass);
175 // SSE has no load+extend ops
176 setOperationAction(ISD::EXTLOAD, MVT::f32, Expand);
177 setOperationAction(ISD::ZEXTLOAD, MVT::f32, Expand);
179 // SSE has no i16 to fp conversion, only i32
180 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
181 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
183 // Expand FP_TO_UINT into a select.
184 // FIXME: We would like to use a Custom expander here eventually to do
185 // the optimal thing for SSE vs. the default expansion in the legalizer.
186 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
188 // We don't support sin/cos/sqrt/fmod
189 setOperationAction(ISD::FSIN , MVT::f64, Expand);
190 setOperationAction(ISD::FCOS , MVT::f64, Expand);
191 setOperationAction(ISD::FABS , MVT::f64, Expand);
192 setOperationAction(ISD::FNEG , MVT::f64, Expand);
193 setOperationAction(ISD::FREM , MVT::f64, Expand);
194 setOperationAction(ISD::FSIN , MVT::f32, Expand);
195 setOperationAction(ISD::FCOS , MVT::f32, Expand);
196 setOperationAction(ISD::FABS , MVT::f32, Expand);
197 setOperationAction(ISD::FNEG , MVT::f32, Expand);
198 setOperationAction(ISD::FREM , MVT::f32, Expand);
200 addLegalFPImmediate(+0.0); // xorps / xorpd
202 // Set up the FP register classes.
203 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
206 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom);
207 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
211 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
212 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
215 addLegalFPImmediate(+0.0); // FLD0
216 addLegalFPImmediate(+1.0); // FLD1
217 addLegalFPImmediate(-0.0); // FLD0/FCHS
218 addLegalFPImmediate(-1.0); // FLD1/FCHS
220 computeRegisterProperties();
222 maxStoresPerMemSet = 8; // For %llvm.memset -> sequence of stores
223 maxStoresPerMemCpy = 8; // For %llvm.memcpy -> sequence of stores
224 maxStoresPerMemMove = 8; // For %llvm.memmove -> sequence of stores
225 allowUnalignedMemoryAccesses = true; // x86 supports it!
228 std::vector<SDOperand>
229 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
230 if (F.getCallingConv() == CallingConv::Fast && EnableFastCC)
231 return LowerFastCCArguments(F, DAG);
232 return LowerCCCArguments(F, DAG);
235 std::pair<SDOperand, SDOperand>
236 X86TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
237 bool isVarArg, unsigned CallingConv,
239 SDOperand Callee, ArgListTy &Args,
241 assert((!isVarArg || CallingConv == CallingConv::C) &&
242 "Only C takes varargs!");
244 // If the callee is a GlobalAddress node (quite common, every direct call is)
245 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
246 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
247 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
248 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
249 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
251 if (CallingConv == CallingConv::Fast && EnableFastCC)
252 return LowerFastCCCallTo(Chain, RetTy, isTailCall, Callee, Args, DAG);
253 return LowerCCCCallTo(Chain, RetTy, isVarArg, isTailCall, Callee, Args, DAG);
256 SDOperand X86TargetLowering::LowerReturnTo(SDOperand Chain, SDOperand Op,
259 return DAG.getNode(ISD::RET, MVT::Other, Chain, Op);
262 MVT::ValueType OpVT = Op.getValueType();
264 default: assert(0 && "Unknown type to return!");
266 Copy = DAG.getCopyToReg(Chain, X86::EAX, Op, SDOperand());
269 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op,
270 DAG.getConstant(1, MVT::i32));
271 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op,
272 DAG.getConstant(0, MVT::i32));
273 Copy = DAG.getCopyToReg(Chain, X86::EDX, Hi, SDOperand());
274 Copy = DAG.getCopyToReg(Copy, X86::EAX, Lo, Copy.getValue(1));
280 if (OpVT == MVT::f32)
281 Op = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Op);
282 std::vector<MVT::ValueType> Tys;
283 Tys.push_back(MVT::Other);
284 Tys.push_back(MVT::Flag);
285 std::vector<SDOperand> Ops;
286 Ops.push_back(Chain);
288 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
290 // Spill the value to memory and reload it into top of stack.
291 unsigned Size = MVT::getSizeInBits(OpVT)/8;
292 MachineFunction &MF = DAG.getMachineFunction();
293 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
294 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
295 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Op,
296 StackSlot, DAG.getSrcValue(NULL));
297 std::vector<MVT::ValueType> Tys;
298 Tys.push_back(MVT::f64);
299 Tys.push_back(MVT::Other);
300 std::vector<SDOperand> Ops;
301 Ops.push_back(Chain);
302 Ops.push_back(StackSlot);
303 Ops.push_back(DAG.getValueType(OpVT));
304 Copy = DAG.getNode(X86ISD::FLD, Tys, Ops);
306 Tys.push_back(MVT::Other);
307 Tys.push_back(MVT::Flag);
309 Ops.push_back(Copy.getValue(1));
311 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
316 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other,
317 Copy, DAG.getConstant(getBytesToPopOnReturn(), MVT::i16),
321 //===----------------------------------------------------------------------===//
322 // C Calling Convention implementation
323 //===----------------------------------------------------------------------===//
325 std::vector<SDOperand>
326 X86TargetLowering::LowerCCCArguments(Function &F, SelectionDAG &DAG) {
327 std::vector<SDOperand> ArgValues;
329 MachineFunction &MF = DAG.getMachineFunction();
330 MachineFrameInfo *MFI = MF.getFrameInfo();
332 // Add DAG nodes to load the arguments... On entry to a function on the X86,
333 // the stack frame looks like this:
335 // [ESP] -- return address
336 // [ESP + 4] -- first argument (leftmost lexically)
337 // [ESP + 8] -- second argument, if first argument is four bytes in size
340 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
341 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
342 MVT::ValueType ObjectVT = getValueType(I->getType());
343 unsigned ArgIncrement = 4;
346 default: assert(0 && "Unhandled argument type!");
348 case MVT::i8: ObjSize = 1; break;
349 case MVT::i16: ObjSize = 2; break;
350 case MVT::i32: ObjSize = 4; break;
351 case MVT::i64: ObjSize = ArgIncrement = 8; break;
352 case MVT::f32: ObjSize = 4; break;
353 case MVT::f64: ObjSize = ArgIncrement = 8; break;
355 // Create the frame index object for this incoming parameter...
356 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
358 // Create the SelectionDAG nodes corresponding to a load from this parameter
359 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
361 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
365 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
366 DAG.getSrcValue(NULL));
368 if (MVT::isInteger(ObjectVT))
369 ArgValue = DAG.getConstant(0, ObjectVT);
371 ArgValue = DAG.getConstantFP(0, ObjectVT);
373 ArgValues.push_back(ArgValue);
375 ArgOffset += ArgIncrement; // Move on to the next argument...
378 // If the function takes variable number of arguments, make a frame index for
379 // the start of the first vararg value... for expansion of llvm.va_start.
381 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
382 ReturnAddrIndex = 0; // No return address slot generated yet.
383 BytesToPopOnReturn = 0; // Callee pops nothing.
384 BytesCallerReserves = ArgOffset;
386 // Finally, inform the code generator which regs we return values in.
387 switch (getValueType(F.getReturnType())) {
388 default: assert(0 && "Unknown type!");
389 case MVT::isVoid: break;
394 MF.addLiveOut(X86::EAX);
397 MF.addLiveOut(X86::EAX);
398 MF.addLiveOut(X86::EDX);
402 MF.addLiveOut(X86::ST0);
408 std::pair<SDOperand, SDOperand>
409 X86TargetLowering::LowerCCCCallTo(SDOperand Chain, const Type *RetTy,
410 bool isVarArg, bool isTailCall,
411 SDOperand Callee, ArgListTy &Args,
413 // Count how many bytes are to be pushed on the stack.
414 unsigned NumBytes = 0;
418 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
419 DAG.getConstant(0, getPointerTy()));
421 for (unsigned i = 0, e = Args.size(); i != e; ++i)
422 switch (getValueType(Args[i].second)) {
423 default: assert(0 && "Unknown value type!");
437 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
438 DAG.getConstant(NumBytes, getPointerTy()));
440 // Arguments go on the stack in reverse order, as specified by the ABI.
441 unsigned ArgOffset = 0;
442 SDOperand StackPtr = DAG.getRegister(X86::ESP, MVT::i32);
443 std::vector<SDOperand> Stores;
445 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
446 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
447 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
449 switch (getValueType(Args[i].second)) {
450 default: assert(0 && "Unexpected ValueType for argument!");
454 // Promote the integer to 32 bits. If the input type is signed use a
455 // sign extend, otherwise use a zero extend.
456 if (Args[i].second->isSigned())
457 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
459 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
464 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
465 Args[i].first, PtrOff,
466 DAG.getSrcValue(NULL)));
471 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
472 Args[i].first, PtrOff,
473 DAG.getSrcValue(NULL)));
478 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
481 std::vector<MVT::ValueType> RetVals;
482 MVT::ValueType RetTyVT = getValueType(RetTy);
483 RetVals.push_back(MVT::Other);
485 // The result values produced have to be legal. Promote the result.
487 case MVT::isVoid: break;
489 RetVals.push_back(RetTyVT);
494 RetVals.push_back(MVT::i32);
498 RetVals.push_back(MVT::f32);
500 RetVals.push_back(MVT::f64);
503 RetVals.push_back(MVT::i32);
504 RetVals.push_back(MVT::i32);
509 std::vector<MVT::ValueType> NodeTys;
510 NodeTys.push_back(MVT::Other); // Returns a chain
511 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
512 std::vector<SDOperand> Ops;
513 Ops.push_back(Chain);
514 Ops.push_back(Callee);
516 // FIXME: Do not generate X86ISD::TAILCALL for now.
517 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
518 SDOperand InFlag = Chain.getValue(1);
521 if (RetTyVT != MVT::isVoid) {
523 default: assert(0 && "Unknown value type to return!");
526 RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
527 Chain = RetVal.getValue(1);
530 RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
531 Chain = RetVal.getValue(1);
534 RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
535 Chain = RetVal.getValue(1);
538 SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
539 SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
541 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
542 Chain = Hi.getValue(1);
547 std::vector<MVT::ValueType> Tys;
548 Tys.push_back(MVT::f64);
549 Tys.push_back(MVT::Other);
550 std::vector<SDOperand> Ops;
551 Ops.push_back(Chain);
552 Ops.push_back(InFlag);
553 RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
554 Chain = RetVal.getValue(1);
556 unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
557 MachineFunction &MF = DAG.getMachineFunction();
558 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
559 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
561 Tys.push_back(MVT::Other);
563 Ops.push_back(Chain);
564 Ops.push_back(RetVal);
565 Ops.push_back(StackSlot);
566 Ops.push_back(DAG.getValueType(RetTyVT));
567 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
568 RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
569 DAG.getSrcValue(NULL));
570 Chain = RetVal.getValue(1);
571 } else if (RetTyVT == MVT::f32)
572 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
578 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
579 DAG.getConstant(NumBytes, getPointerTy()),
580 DAG.getConstant(0, getPointerTy()));
581 return std::make_pair(RetVal, Chain);
583 std::vector<SDOperand> Ops;
584 Ops.push_back(Chain);
585 Ops.push_back(Callee);
586 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
587 Ops.push_back(DAG.getConstant(0, getPointerTy()));
589 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
594 case MVT::isVoid: break;
596 ResultVal = TheCall.getValue(1);
601 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
604 // FIXME: we would really like to remember that this FP_ROUND operation is
605 // okay to eliminate if we allow excess FP precision.
606 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
609 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
610 TheCall.getValue(2));
614 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
615 return std::make_pair(ResultVal, Chain);
620 X86TargetLowering::LowerVAStart(SDOperand Chain, SDOperand VAListP,
621 Value *VAListV, SelectionDAG &DAG) {
622 // vastart just stores the address of the VarArgsFrameIndex slot.
623 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
624 return DAG.getNode(ISD::STORE, MVT::Other, Chain, FR, VAListP,
625 DAG.getSrcValue(VAListV));
629 std::pair<SDOperand,SDOperand>
630 X86TargetLowering::LowerVAArg(SDOperand Chain, SDOperand VAListP,
631 Value *VAListV, const Type *ArgTy,
633 MVT::ValueType ArgVT = getValueType(ArgTy);
634 SDOperand Val = DAG.getLoad(MVT::i32, Chain,
635 VAListP, DAG.getSrcValue(VAListV));
636 SDOperand Result = DAG.getLoad(ArgVT, Chain, Val,
637 DAG.getSrcValue(NULL));
639 if (ArgVT == MVT::i32)
642 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
643 "Other types should have been promoted for varargs!");
646 Val = DAG.getNode(ISD::ADD, Val.getValueType(), Val,
647 DAG.getConstant(Amt, Val.getValueType()));
648 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
649 Val, VAListP, DAG.getSrcValue(VAListV));
650 return std::make_pair(Result, Chain);
653 //===----------------------------------------------------------------------===//
654 // Fast Calling Convention implementation
655 //===----------------------------------------------------------------------===//
657 // The X86 'fast' calling convention passes up to two integer arguments in
658 // registers (an appropriate portion of EAX/EDX), passes arguments in C order,
659 // and requires that the callee pop its arguments off the stack (allowing proper
660 // tail calls), and has the same return value conventions as C calling convs.
662 // This calling convention always arranges for the callee pop value to be 8n+4
663 // bytes, which is needed for tail recursion elimination and stack alignment
666 // Note that this can be enhanced in the future to pass fp vals in registers
667 // (when we have a global fp allocator) and do other tricks.
670 /// AddLiveIn - This helper function adds the specified physical register to the
671 /// MachineFunction as a live in value. It also creates a corresponding virtual
673 static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
674 TargetRegisterClass *RC) {
675 assert(RC->contains(PReg) && "Not the correct regclass!");
676 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
677 MF.addLiveIn(PReg, VReg);
682 std::vector<SDOperand>
683 X86TargetLowering::LowerFastCCArguments(Function &F, SelectionDAG &DAG) {
684 std::vector<SDOperand> ArgValues;
686 MachineFunction &MF = DAG.getMachineFunction();
687 MachineFrameInfo *MFI = MF.getFrameInfo();
689 // Add DAG nodes to load the arguments... On entry to a function the stack
690 // frame looks like this:
692 // [ESP] -- return address
693 // [ESP + 4] -- first nonreg argument (leftmost lexically)
694 // [ESP + 8] -- second nonreg argument, if first argument is 4 bytes in size
696 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
698 // Keep track of the number of integer regs passed so far. This can be either
699 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
701 unsigned NumIntRegs = 0;
703 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
704 MVT::ValueType ObjectVT = getValueType(I->getType());
705 unsigned ArgIncrement = 4;
706 unsigned ObjSize = 0;
710 default: assert(0 && "Unhandled argument type!");
713 if (NumIntRegs < 2) {
714 if (!I->use_empty()) {
715 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
716 X86::R8RegisterClass);
717 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i8);
718 DAG.setRoot(ArgValue.getValue(1));
719 if (ObjectVT == MVT::i1)
720 // FIXME: Should insert a assertzext here.
721 ArgValue = DAG.getNode(ISD::TRUNCATE, MVT::i1, ArgValue);
730 if (NumIntRegs < 2) {
731 if (!I->use_empty()) {
732 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
733 X86::R16RegisterClass);
734 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i16);
735 DAG.setRoot(ArgValue.getValue(1));
743 if (NumIntRegs < 2) {
744 if (!I->use_empty()) {
745 unsigned VReg = AddLiveIn(MF,NumIntRegs ? X86::EDX : X86::EAX,
746 X86::R32RegisterClass);
747 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
748 DAG.setRoot(ArgValue.getValue(1));
756 if (NumIntRegs == 0) {
757 if (!I->use_empty()) {
758 unsigned BotReg = AddLiveIn(MF, X86::EAX, X86::R32RegisterClass);
759 unsigned TopReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
761 SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
762 SDOperand Hi = DAG.getCopyFromReg(Low.getValue(1), TopReg, MVT::i32);
763 DAG.setRoot(Hi.getValue(1));
765 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
769 } else if (NumIntRegs == 1) {
770 if (!I->use_empty()) {
771 unsigned BotReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
772 SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
773 DAG.setRoot(Low.getValue(1));
775 // Load the high part from memory.
776 // Create the frame index object for this incoming parameter...
777 int FI = MFI->CreateFixedObject(4, ArgOffset);
778 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
779 SDOperand Hi = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
780 DAG.getSrcValue(NULL));
781 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
787 ObjSize = ArgIncrement = 8;
789 case MVT::f32: ObjSize = 4; break;
790 case MVT::f64: ObjSize = ArgIncrement = 8; break;
793 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
795 if (ObjSize && !I->use_empty()) {
796 // Create the frame index object for this incoming parameter...
797 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
799 // Create the SelectionDAG nodes corresponding to a load from this
801 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
803 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
804 DAG.getSrcValue(NULL));
805 } else if (ArgValue.Val == 0) {
806 if (MVT::isInteger(ObjectVT))
807 ArgValue = DAG.getConstant(0, ObjectVT);
809 ArgValue = DAG.getConstantFP(0, ObjectVT);
811 ArgValues.push_back(ArgValue);
814 ArgOffset += ArgIncrement; // Move on to the next argument.
817 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
818 // arguments and the arguments after the retaddr has been pushed are aligned.
819 if ((ArgOffset & 7) == 0)
822 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
823 ReturnAddrIndex = 0; // No return address slot generated yet.
824 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
825 BytesCallerReserves = 0;
827 // Finally, inform the code generator which regs we return values in.
828 switch (getValueType(F.getReturnType())) {
829 default: assert(0 && "Unknown type!");
830 case MVT::isVoid: break;
835 MF.addLiveOut(X86::EAX);
838 MF.addLiveOut(X86::EAX);
839 MF.addLiveOut(X86::EDX);
843 MF.addLiveOut(X86::ST0);
849 std::pair<SDOperand, SDOperand>
850 X86TargetLowering::LowerFastCCCallTo(SDOperand Chain, const Type *RetTy,
851 bool isTailCall, SDOperand Callee,
852 ArgListTy &Args, SelectionDAG &DAG) {
853 // Count how many bytes are to be pushed on the stack.
854 unsigned NumBytes = 0;
856 // Keep track of the number of integer regs passed so far. This can be either
857 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
859 unsigned NumIntRegs = 0;
861 for (unsigned i = 0, e = Args.size(); i != e; ++i)
862 switch (getValueType(Args[i].second)) {
863 default: assert(0 && "Unknown value type!");
868 if (NumIntRegs < 2) {
877 if (NumIntRegs == 0) {
880 } else if (NumIntRegs == 1) {
892 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
893 // arguments and the arguments after the retaddr has been pushed are aligned.
894 if ((NumBytes & 7) == 0)
897 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
898 DAG.getConstant(NumBytes, getPointerTy()));
900 // Arguments go on the stack in reverse order, as specified by the ABI.
901 unsigned ArgOffset = 0;
902 SDOperand StackPtr = DAG.getCopyFromReg(DAG.getEntryNode(),
905 std::vector<SDOperand> Stores;
906 std::vector<SDOperand> RegValuesToPass;
907 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
908 switch (getValueType(Args[i].second)) {
909 default: assert(0 && "Unexpected ValueType for argument!");
911 Args[i].first = DAG.getNode(ISD::ANY_EXTEND, MVT::i8, Args[i].first);
916 if (NumIntRegs < 2) {
917 RegValuesToPass.push_back(Args[i].first);
923 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
924 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
925 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
926 Args[i].first, PtrOff,
927 DAG.getSrcValue(NULL)));
932 if (NumIntRegs < 2) { // Can pass part of it in regs?
933 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
934 Args[i].first, DAG.getConstant(1, MVT::i32));
935 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
936 Args[i].first, DAG.getConstant(0, MVT::i32));
937 RegValuesToPass.push_back(Lo);
939 if (NumIntRegs < 2) { // Pass both parts in regs?
940 RegValuesToPass.push_back(Hi);
943 // Pass the high part in memory.
944 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
945 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
946 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
947 Hi, PtrOff, DAG.getSrcValue(NULL)));
954 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
955 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
956 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
957 Args[i].first, PtrOff,
958 DAG.getSrcValue(NULL)));
964 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
966 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
967 // arguments and the arguments after the retaddr has been pushed are aligned.
968 if ((ArgOffset & 7) == 0)
971 std::vector<MVT::ValueType> RetVals;
972 MVT::ValueType RetTyVT = getValueType(RetTy);
974 RetVals.push_back(MVT::Other);
976 // The result values produced have to be legal. Promote the result.
978 case MVT::isVoid: break;
980 RetVals.push_back(RetTyVT);
985 RetVals.push_back(MVT::i32);
989 RetVals.push_back(MVT::f32);
991 RetVals.push_back(MVT::f64);
994 RetVals.push_back(MVT::i32);
995 RetVals.push_back(MVT::i32);
1000 // Build a sequence of copy-to-reg nodes chained together with token chain
1001 // and flag operands which copy the outgoing args into registers.
1003 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
1005 SDOperand RegToPass = RegValuesToPass[i];
1006 switch (RegToPass.getValueType()) {
1007 default: assert(0 && "Bad thing to pass in regs");
1009 CCReg = (i == 0) ? X86::AL : X86::DL;
1012 CCReg = (i == 0) ? X86::AX : X86::DX;
1015 CCReg = (i == 0) ? X86::EAX : X86::EDX;
1019 Chain = DAG.getCopyToReg(Chain, CCReg, RegToPass, InFlag);
1020 InFlag = Chain.getValue(1);
1023 std::vector<MVT::ValueType> NodeTys;
1024 NodeTys.push_back(MVT::Other); // Returns a chain
1025 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1026 std::vector<SDOperand> Ops;
1027 Ops.push_back(Chain);
1028 Ops.push_back(Callee);
1030 Ops.push_back(InFlag);
1032 // FIXME: Do not generate X86ISD::TAILCALL for now.
1033 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
1034 InFlag = Chain.getValue(1);
1037 if (RetTyVT != MVT::isVoid) {
1039 default: assert(0 && "Unknown value type to return!");
1042 RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
1043 Chain = RetVal.getValue(1);
1046 RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
1047 Chain = RetVal.getValue(1);
1050 RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
1051 Chain = RetVal.getValue(1);
1054 SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
1055 SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
1057 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
1058 Chain = Hi.getValue(1);
1063 std::vector<MVT::ValueType> Tys;
1064 Tys.push_back(MVT::f64);
1065 Tys.push_back(MVT::Other);
1066 std::vector<SDOperand> Ops;
1067 Ops.push_back(Chain);
1068 Ops.push_back(InFlag);
1069 RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
1070 Chain = RetVal.getValue(1);
1072 unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
1073 MachineFunction &MF = DAG.getMachineFunction();
1074 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
1075 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1077 Tys.push_back(MVT::Other);
1079 Ops.push_back(Chain);
1080 Ops.push_back(RetVal);
1081 Ops.push_back(StackSlot);
1082 Ops.push_back(DAG.getValueType(RetTyVT));
1083 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
1084 RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
1085 DAG.getSrcValue(NULL));
1086 Chain = RetVal.getValue(1);
1087 } else if (RetTyVT == MVT::f32)
1088 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
1094 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
1095 DAG.getConstant(ArgOffset, getPointerTy()),
1096 DAG.getConstant(ArgOffset, getPointerTy()));
1097 return std::make_pair(RetVal, Chain);
1099 std::vector<SDOperand> Ops;
1100 Ops.push_back(Chain);
1101 Ops.push_back(Callee);
1102 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
1103 // Callee pops all arg values on the stack.
1104 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
1106 // Pass register arguments as needed.
1107 Ops.insert(Ops.end(), RegValuesToPass.begin(), RegValuesToPass.end());
1109 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
1111 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
1113 SDOperand ResultVal;
1115 case MVT::isVoid: break;
1117 ResultVal = TheCall.getValue(1);
1122 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
1125 // FIXME: we would really like to remember that this FP_ROUND operation is
1126 // okay to eliminate if we allow excess FP precision.
1127 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
1130 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
1131 TheCall.getValue(2));
1135 return std::make_pair(ResultVal, Chain);
1139 SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
1140 if (ReturnAddrIndex == 0) {
1141 // Set up a frame object for the return address.
1142 MachineFunction &MF = DAG.getMachineFunction();
1143 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
1146 return DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
1151 std::pair<SDOperand, SDOperand> X86TargetLowering::
1152 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
1153 SelectionDAG &DAG) {
1155 if (Depth) // Depths > 0 not supported yet!
1156 Result = DAG.getConstant(0, getPointerTy());
1158 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
1159 if (!isFrameAddress)
1160 // Just load the return address
1161 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI,
1162 DAG.getSrcValue(NULL));
1164 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
1165 DAG.getConstant(4, MVT::i32));
1167 return std::make_pair(Result, Chain);
1170 /// getCondBrOpcodeForX86CC - Returns the X86 conditional branch opcode
1171 /// which corresponds to the condition code.
1172 static unsigned getCondBrOpcodeForX86CC(unsigned X86CC) {
1174 default: assert(0 && "Unknown X86 conditional code!");
1175 case X86ISD::COND_A: return X86::JA;
1176 case X86ISD::COND_AE: return X86::JAE;
1177 case X86ISD::COND_B: return X86::JB;
1178 case X86ISD::COND_BE: return X86::JBE;
1179 case X86ISD::COND_E: return X86::JE;
1180 case X86ISD::COND_G: return X86::JG;
1181 case X86ISD::COND_GE: return X86::JGE;
1182 case X86ISD::COND_L: return X86::JL;
1183 case X86ISD::COND_LE: return X86::JLE;
1184 case X86ISD::COND_NE: return X86::JNE;
1185 case X86ISD::COND_NO: return X86::JNO;
1186 case X86ISD::COND_NP: return X86::JNP;
1187 case X86ISD::COND_NS: return X86::JNS;
1188 case X86ISD::COND_O: return X86::JO;
1189 case X86ISD::COND_P: return X86::JP;
1190 case X86ISD::COND_S: return X86::JS;
1194 /// getX86CC - do a one to one translation of a ISD::CondCode to the X86
1195 /// specific condition code. It returns a X86ISD::COND_INVALID if it cannot
1196 /// do a direct translation.
1197 static unsigned getX86CC(SDOperand CC, bool isFP) {
1198 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
1199 unsigned X86CC = X86ISD::COND_INVALID;
1201 switch (SetCCOpcode) {
1203 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
1204 case ISD::SETGT: X86CC = X86ISD::COND_G; break;
1205 case ISD::SETGE: X86CC = X86ISD::COND_GE; break;
1206 case ISD::SETLT: X86CC = X86ISD::COND_L; break;
1207 case ISD::SETLE: X86CC = X86ISD::COND_LE; break;
1208 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1209 case ISD::SETULT: X86CC = X86ISD::COND_B; break;
1210 case ISD::SETUGT: X86CC = X86ISD::COND_A; break;
1211 case ISD::SETULE: X86CC = X86ISD::COND_BE; break;
1212 case ISD::SETUGE: X86CC = X86ISD::COND_AE; break;
1215 // On a floating point condition, the flags are set as follows:
1217 // 0 | 0 | 0 | X > Y
1218 // 0 | 0 | 1 | X < Y
1219 // 1 | 0 | 0 | X == Y
1220 // 1 | 1 | 1 | unordered
1221 switch (SetCCOpcode) {
1224 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
1226 case ISD::SETGT: X86CC = X86ISD::COND_A; break;
1228 case ISD::SETGE: X86CC = X86ISD::COND_AE; break;
1230 case ISD::SETLT: X86CC = X86ISD::COND_B; break;
1232 case ISD::SETLE: X86CC = X86ISD::COND_BE; break;
1234 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1235 case ISD::SETUO: X86CC = X86ISD::COND_P; break;
1236 case ISD::SETO: X86CC = X86ISD::COND_NP; break;
1242 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
1243 /// code. Current x86 isa includes the following FP cmov instructions:
1244 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
1245 static bool hasFPCMov(unsigned X86CC) {
1249 case X86ISD::COND_B:
1250 case X86ISD::COND_BE:
1251 case X86ISD::COND_E:
1252 case X86ISD::COND_P:
1253 case X86ISD::COND_A:
1254 case X86ISD::COND_AE:
1255 case X86ISD::COND_NE:
1256 case X86ISD::COND_NP:
1262 X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1263 MachineBasicBlock *BB) {
1264 assert((MI->getOpcode() == X86::CMOV_FR32 ||
1265 MI->getOpcode() == X86::CMOV_FR64) &&
1266 "Unexpected instr type to insert");
1268 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1269 // control-flow pattern. The incoming instruction knows the destination vreg
1270 // to set, the condition code register to branch on, the true/false values to
1271 // select between, and a branch opcode to use.
1272 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1273 ilist<MachineBasicBlock>::iterator It = BB;
1279 // cmpTY ccX, r1, r2
1281 // fallthrough --> copy0MBB
1282 MachineBasicBlock *thisMBB = BB;
1283 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
1284 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
1285 unsigned Opc = getCondBrOpcodeForX86CC(MI->getOperand(3).getImmedValue());
1286 BuildMI(BB, Opc, 1).addMBB(sinkMBB);
1287 MachineFunction *F = BB->getParent();
1288 F->getBasicBlockList().insert(It, copy0MBB);
1289 F->getBasicBlockList().insert(It, sinkMBB);
1290 // Update machine-CFG edges
1291 BB->addSuccessor(copy0MBB);
1292 BB->addSuccessor(sinkMBB);
1295 // %FalseValue = ...
1296 // # fallthrough to sinkMBB
1299 // Update machine-CFG edges
1300 BB->addSuccessor(sinkMBB);
1303 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1306 BuildMI(BB, X86::PHI, 4, MI->getOperand(0).getReg())
1307 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
1308 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1310 delete MI; // The pseudo instruction is gone now.
1315 //===----------------------------------------------------------------------===//
1316 // X86 Custom Lowering Hooks
1317 //===----------------------------------------------------------------------===//
1319 /// LowerOperation - Provide custom lowering hooks for some operations.
1321 SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
1322 switch (Op.getOpcode()) {
1323 default: assert(0 && "Should not custom lower this!");
1324 case ISD::ADD_PARTS:
1325 case ISD::SUB_PARTS: {
1326 assert(Op.getNumOperands() == 4 && Op.getValueType() == MVT::i32 &&
1327 "Not an i64 add/sub!");
1328 bool isAdd = Op.getOpcode() == ISD::ADD_PARTS;
1329 std::vector<MVT::ValueType> Tys;
1330 Tys.push_back(MVT::i32);
1331 Tys.push_back(MVT::Flag);
1332 std::vector<SDOperand> Ops;
1333 Ops.push_back(Op.getOperand(0));
1334 Ops.push_back(Op.getOperand(2));
1335 SDOperand Lo = DAG.getNode(isAdd ? X86ISD::ADD_FLAG : X86ISD::SUB_FLAG,
1337 SDOperand Hi = DAG.getNode(isAdd ? X86ISD::ADC : X86ISD::SBB, MVT::i32,
1338 Op.getOperand(1), Op.getOperand(3),
1341 Tys.push_back(MVT::i32);
1342 Tys.push_back(MVT::i32);
1346 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
1348 case ISD::SHL_PARTS:
1349 case ISD::SRA_PARTS:
1350 case ISD::SRL_PARTS: {
1351 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
1352 "Not an i64 shift!");
1353 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
1354 SDOperand ShOpLo = Op.getOperand(0);
1355 SDOperand ShOpHi = Op.getOperand(1);
1356 SDOperand ShAmt = Op.getOperand(2);
1357 SDOperand Tmp1 = isSRA ? DAG.getNode(ISD::SRA, MVT::i32, ShOpHi,
1358 DAG.getConstant(31, MVT::i32))
1359 : DAG.getConstant(0, MVT::i32);
1361 SDOperand Tmp2, Tmp3;
1362 if (Op.getOpcode() == ISD::SHL_PARTS) {
1363 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt);
1364 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt);
1366 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt);
1367 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SHL, MVT::i32, ShOpHi, ShAmt);
1370 SDOperand InFlag = DAG.getNode(X86ISD::TEST, MVT::Flag,
1371 ShAmt, DAG.getConstant(32, MVT::i8));
1374 SDOperand CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
1376 std::vector<MVT::ValueType> Tys;
1377 Tys.push_back(MVT::i32);
1378 Tys.push_back(MVT::Flag);
1379 std::vector<SDOperand> Ops;
1380 if (Op.getOpcode() == ISD::SHL_PARTS) {
1381 Ops.push_back(Tmp2);
1382 Ops.push_back(Tmp3);
1384 Ops.push_back(InFlag);
1385 Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1386 InFlag = Hi.getValue(1);
1389 Ops.push_back(Tmp3);
1390 Ops.push_back(Tmp1);
1392 Ops.push_back(InFlag);
1393 Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1395 Ops.push_back(Tmp2);
1396 Ops.push_back(Tmp3);
1398 Ops.push_back(InFlag);
1399 Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1400 InFlag = Lo.getValue(1);
1403 Ops.push_back(Tmp3);
1404 Ops.push_back(Tmp1);
1406 Ops.push_back(InFlag);
1407 Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1411 Tys.push_back(MVT::i32);
1412 Tys.push_back(MVT::i32);
1416 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
1418 case ISD::SINT_TO_FP: {
1419 assert(Op.getValueType() == MVT::f64 &&
1420 Op.getOperand(0).getValueType() <= MVT::i64 &&
1421 Op.getOperand(0).getValueType() >= MVT::i16 &&
1422 "Unknown SINT_TO_FP to lower!");
1425 MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
1426 unsigned Size = MVT::getSizeInBits(SrcVT)/8;
1427 MachineFunction &MF = DAG.getMachineFunction();
1428 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
1429 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1430 SDOperand Chain = DAG.getNode(ISD::STORE, MVT::Other,
1431 DAG.getEntryNode(), Op.getOperand(0),
1432 StackSlot, DAG.getSrcValue(NULL));
1435 std::vector<MVT::ValueType> Tys;
1436 Tys.push_back(MVT::f64);
1437 Tys.push_back(MVT::Flag);
1438 std::vector<SDOperand> Ops;
1439 Ops.push_back(Chain);
1440 Ops.push_back(StackSlot);
1441 Ops.push_back(DAG.getValueType(SrcVT));
1442 Result = DAG.getNode(X86ISD::FILD, Tys, Ops);
1445 case ISD::FP_TO_SINT: {
1446 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
1447 Op.getOperand(0).getValueType() == MVT::f64 &&
1448 "Unknown FP_TO_SINT to lower!");
1449 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
1451 MachineFunction &MF = DAG.getMachineFunction();
1452 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
1453 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
1454 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1457 switch (Op.getValueType()) {
1458 default: assert(0 && "Invalid FP_TO_SINT to lower!");
1459 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
1460 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
1461 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
1464 // Build the FP_TO_INT*_IN_MEM
1465 std::vector<SDOperand> Ops;
1466 Ops.push_back(DAG.getEntryNode());
1467 Ops.push_back(Op.getOperand(0));
1468 Ops.push_back(StackSlot);
1469 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops);
1472 return DAG.getLoad(Op.getValueType(), FIST, StackSlot,
1473 DAG.getSrcValue(NULL));
1475 case ISD::READCYCLECOUNTER: {
1476 std::vector<MVT::ValueType> Tys;
1477 Tys.push_back(MVT::Other);
1478 Tys.push_back(MVT::Flag);
1479 std::vector<SDOperand> Ops;
1480 Ops.push_back(Op.getOperand(0));
1481 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, Ops);
1483 Ops.push_back(DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)));
1484 Ops.push_back(DAG.getCopyFromReg(Ops[0].getValue(1), X86::EDX,
1485 MVT::i32, Ops[0].getValue(2)));
1486 Ops.push_back(Ops[1].getValue(1));
1487 Tys[0] = Tys[1] = MVT::i32;
1488 Tys.push_back(MVT::Other);
1489 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
1492 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
1493 SDOperand CC = Op.getOperand(2);
1494 SDOperand Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1495 Op.getOperand(0), Op.getOperand(1));
1496 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
1497 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
1498 unsigned X86CC = getX86CC(CC, isFP);
1499 if (X86CC != X86ISD::COND_INVALID) {
1500 return DAG.getNode(X86ISD::SETCC, MVT::i8,
1501 DAG.getConstant(X86CC, MVT::i8), Cond);
1503 assert(isFP && "Illegal integer SetCC!");
1505 std::vector<MVT::ValueType> Tys;
1506 std::vector<SDOperand> Ops;
1507 switch (SetCCOpcode) {
1508 default: assert(false && "Illegal floating point SetCC!");
1509 case ISD::SETOEQ: { // !PF & ZF
1510 Tys.push_back(MVT::i8);
1511 Tys.push_back(MVT::Flag);
1512 Ops.push_back(DAG.getConstant(X86ISD::COND_NP, MVT::i8));
1513 Ops.push_back(Cond);
1514 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1515 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
1516 DAG.getConstant(X86ISD::COND_E, MVT::i8),
1518 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
1520 case ISD::SETOLT: { // !PF & CF
1521 Tys.push_back(MVT::i8);
1522 Tys.push_back(MVT::Flag);
1523 Ops.push_back(DAG.getConstant(X86ISD::COND_NP, MVT::i8));
1524 Ops.push_back(Cond);
1525 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1526 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
1527 DAG.getConstant(X86ISD::COND_B, MVT::i8),
1529 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
1531 case ISD::SETOLE: { // !PF & (CF || ZF)
1532 Tys.push_back(MVT::i8);
1533 Tys.push_back(MVT::Flag);
1534 Ops.push_back(DAG.getConstant(X86ISD::COND_NP, MVT::i8));
1535 Ops.push_back(Cond);
1536 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1537 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
1538 DAG.getConstant(X86ISD::COND_BE, MVT::i8),
1540 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
1542 case ISD::SETUGT: { // PF | (!ZF & !CF)
1543 Tys.push_back(MVT::i8);
1544 Tys.push_back(MVT::Flag);
1545 Ops.push_back(DAG.getConstant(X86ISD::COND_P, MVT::i8));
1546 Ops.push_back(Cond);
1547 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1548 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
1549 DAG.getConstant(X86ISD::COND_A, MVT::i8),
1551 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
1553 case ISD::SETUGE: { // PF | !CF
1554 Tys.push_back(MVT::i8);
1555 Tys.push_back(MVT::Flag);
1556 Ops.push_back(DAG.getConstant(X86ISD::COND_P, MVT::i8));
1557 Ops.push_back(Cond);
1558 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1559 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
1560 DAG.getConstant(X86ISD::COND_AE, MVT::i8),
1562 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
1564 case ISD::SETUNE: { // PF | !ZF
1565 Tys.push_back(MVT::i8);
1566 Tys.push_back(MVT::Flag);
1567 Ops.push_back(DAG.getConstant(X86ISD::COND_P, MVT::i8));
1568 Ops.push_back(Cond);
1569 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1570 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
1571 DAG.getConstant(X86ISD::COND_NE, MVT::i8),
1573 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
1579 MVT::ValueType VT = Op.getValueType();
1580 bool isFP = MVT::isFloatingPoint(VT);
1581 bool isFPStack = isFP && (X86Vector < SSE2);
1582 bool isFPSSE = isFP && (X86Vector >= SSE2);
1583 bool addTest = false;
1584 SDOperand Op0 = Op.getOperand(0);
1586 if (Op0.getOpcode() == X86ISD::SETCC) {
1587 // If condition flag is set by a X86ISD::CMP, then make a copy of it
1588 // (since flag operand cannot be shared). If the X86ISD::SETCC does not
1589 // have another use it will be eliminated.
1590 // If the X86ISD::SETCC has more than one use, then it's probably better
1591 // to use a test instead of duplicating the X86ISD::CMP (for register
1592 // pressure reason).
1593 if (Op0.hasOneUse() && Op0.getOperand(1).getOpcode() == X86ISD::CMP) {
1594 CC = Op0.getOperand(0);
1595 Cond = Op0.getOperand(1);
1597 isFPStack && !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
1600 } else if (Op0.getOpcode() == ISD::SETCC) {
1601 CC = Op0.getOperand(2);
1602 bool isFP = MVT::isFloatingPoint(Op0.getOperand(1).getValueType());
1603 unsigned X86CC = getX86CC(CC, isFP);
1604 CC = DAG.getConstant(X86CC, MVT::i8);
1605 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1606 Op0.getOperand(0), Op0.getOperand(1));
1612 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
1613 Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Op0, Op0);
1616 std::vector<MVT::ValueType> Tys;
1617 Tys.push_back(Op.getValueType());
1618 Tys.push_back(MVT::Flag);
1619 std::vector<SDOperand> Ops;
1620 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
1621 // condition is true.
1622 Ops.push_back(Op.getOperand(2));
1623 Ops.push_back(Op.getOperand(1));
1625 Ops.push_back(Cond);
1626 return DAG.getNode(X86ISD::CMOV, Tys, Ops);
1629 bool addTest = false;
1630 SDOperand Cond = Op.getOperand(1);
1631 SDOperand Dest = Op.getOperand(2);
1633 if (Cond.getOpcode() == X86ISD::SETCC) {
1634 // If condition flag is set by a X86ISD::CMP, then make a copy of it
1635 // (since flag operand cannot be shared). If the X86ISD::SETCC does not
1636 // have another use it will be eliminated.
1637 // If the X86ISD::SETCC has more than one use, then it's probably better
1638 // to use a test instead of duplicating the X86ISD::CMP (for register
1639 // pressure reason).
1640 if (Cond.hasOneUse() && Cond.getOperand(1).getOpcode() == X86ISD::CMP) {
1641 CC = Cond.getOperand(0);
1642 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1643 Cond.getOperand(1).getOperand(0),
1644 Cond.getOperand(1).getOperand(1));
1647 } else if (Cond.getOpcode() == ISD::SETCC) {
1648 CC = Cond.getOperand(2);
1649 bool isFP = MVT::isFloatingPoint(Cond.getOperand(1).getValueType());
1650 unsigned X86CC = getX86CC(CC, isFP);
1651 CC = DAG.getConstant(X86CC, MVT::i8);
1652 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1653 Cond.getOperand(0), Cond.getOperand(1));
1658 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
1659 Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Cond, Cond);
1661 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
1662 Op.getOperand(0), Op.getOperand(2), CC, Cond);
1665 // Can only be return void.
1666 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Op.getOperand(0),
1667 DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
1671 SDOperand Chain = Op.getOperand(0);
1673 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
1674 if (Align == 0) Align = 1;
1678 if (ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2))) {
1680 unsigned Val = ValC->getValue() & 255;
1682 // If the value is a constant, then we can potentially use larger sets.
1683 switch (Align & 3) {
1684 case 2: // WORD aligned
1686 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
1687 Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
1689 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
1690 DAG.getConstant(1, MVT::i8));
1691 Val = (Val << 8) | Val;
1694 case 0: // DWORD aligned
1696 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
1697 Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
1699 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
1700 DAG.getConstant(2, MVT::i8));
1701 Val = (Val << 8) | Val;
1702 Val = (Val << 16) | Val;
1705 default: // Byte aligned
1707 Count = Op.getOperand(3);
1712 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT),
1714 InFlag = Chain.getValue(1);
1717 Count = Op.getOperand(3);
1718 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag);
1719 InFlag = Chain.getValue(1);
1722 Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
1723 InFlag = Chain.getValue(1);
1724 Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
1725 InFlag = Chain.getValue(1);
1727 return DAG.getNode(X86ISD::REP_STOS, MVT::Other, Chain,
1728 DAG.getValueType(AVT), InFlag);
1731 SDOperand Chain = Op.getOperand(0);
1733 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
1734 if (Align == 0) Align = 1;
1738 switch (Align & 3) {
1739 case 2: // WORD aligned
1741 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
1742 Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
1744 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
1745 DAG.getConstant(1, MVT::i8));
1747 case 0: // DWORD aligned
1749 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
1750 Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
1752 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
1753 DAG.getConstant(2, MVT::i8));
1755 default: // Byte aligned
1757 Count = Op.getOperand(3);
1762 Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
1763 InFlag = Chain.getValue(1);
1764 Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
1765 InFlag = Chain.getValue(1);
1766 Chain = DAG.getCopyToReg(Chain, X86::ESI, Op.getOperand(2), InFlag);
1767 InFlag = Chain.getValue(1);
1769 return DAG.getNode(X86ISD::REP_MOVS, MVT::Other, Chain,
1770 DAG.getValueType(AVT), InFlag);
1772 case ISD::GlobalAddress: {
1774 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1775 // For Darwin, external and weak symbols are indirect, so we want to load
1776 // the value at address GV, not the value of GV itself. This means that
1777 // the GlobalAddress must be in the base or index register of the address,
1778 // not the GV offset field.
1779 if (getTargetMachine().
1780 getSubtarget<X86Subtarget>().getIndirectExternAndWeakGlobals() &&
1781 (GV->hasWeakLinkage() || GV->isExternal()))
1782 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(),
1783 DAG.getTargetGlobalAddress(GV, getPointerTy()),
1784 DAG.getSrcValue(NULL));
1790 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
1792 default: return NULL;
1793 case X86ISD::ADD_FLAG: return "X86ISD::ADD_FLAG";
1794 case X86ISD::SUB_FLAG: return "X86ISD::SUB_FLAG";
1795 case X86ISD::ADC: return "X86ISD::ADC";
1796 case X86ISD::SBB: return "X86ISD::SBB";
1797 case X86ISD::SHLD: return "X86ISD::SHLD";
1798 case X86ISD::SHRD: return "X86ISD::SHRD";
1799 case X86ISD::FILD: return "X86ISD::FILD";
1800 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
1801 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
1802 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
1803 case X86ISD::FLD: return "X86ISD::FLD";
1804 case X86ISD::FST: return "X86ISD::FST";
1805 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT";
1806 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT";
1807 case X86ISD::CALL: return "X86ISD::CALL";
1808 case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
1809 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
1810 case X86ISD::CMP: return "X86ISD::CMP";
1811 case X86ISD::TEST: return "X86ISD::TEST";
1812 case X86ISD::SETCC: return "X86ISD::SETCC";
1813 case X86ISD::CMOV: return "X86ISD::CMOV";
1814 case X86ISD::BRCOND: return "X86ISD::BRCOND";
1815 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
1816 case X86ISD::REP_STOS: return "X86ISD::RET_STOS";
1817 case X86ISD::REP_MOVS: return "X86ISD::RET_MOVS";
1821 bool X86TargetLowering::isMaskedValueZeroForTargetNode(const SDOperand &Op,
1822 uint64_t Mask) const {
1824 unsigned Opc = Op.getOpcode();
1828 assert(Opc >= ISD::BUILTIN_OP_END && "Expected a target specific node");
1830 case X86ISD::SETCC: return (Mask & 1) == 0;