1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
16 #include "X86InstrBuilder.h"
17 #include "X86ISelLowering.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Function.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/SelectionDAG.h"
25 #include "llvm/CodeGen/SSARegMap.h"
26 #include "llvm/Target/TargetOptions.h"
30 #include "llvm/Support/CommandLine.h"
31 static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
32 cl::desc("Enable fastcc on X86"));
34 X86TargetLowering::X86TargetLowering(TargetMachine &TM)
35 : TargetLowering(TM) {
36 Subtarget = &TM.getSubtarget<X86Subtarget>();
37 X86ScalarSSE = Subtarget->hasSSE2();
39 // Set up the TargetLowering object.
41 // X86 is weird, it always uses i8 for shift amounts and setcc results.
42 setShiftAmountType(MVT::i8);
43 setSetCCResultType(MVT::i8);
44 setSetCCResultContents(ZeroOrOneSetCCResult);
45 setSchedulingPreference(SchedulingForRegPressure);
46 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
47 setStackPointerRegisterToSaveRestore(X86::ESP);
49 // Set up the register classes.
50 addRegisterClass(MVT::i8, X86::R8RegisterClass);
51 addRegisterClass(MVT::i16, X86::R16RegisterClass);
52 addRegisterClass(MVT::i32, X86::R32RegisterClass);
54 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
56 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
57 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
58 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
61 // No SSE i64 SINT_TO_FP, so expand i32 UINT_TO_FP instead.
62 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
64 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
66 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
68 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
69 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
71 // SSE has no i16 to fp conversion, only i32
72 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
73 else if (!X86PatIsel) {
74 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
75 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
78 // We can handle SINT_TO_FP and FP_TO_SINT from/to i64 even though i64
80 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
81 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
83 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
85 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
86 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
89 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
91 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
92 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
95 // Handle FP_TO_UINT by promoting the destination to a larger signed
97 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
98 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
99 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
102 // Expand FP_TO_UINT into a select.
103 // FIXME: We would like to use a Custom expander here eventually to do
104 // the optimal thing for SSE vs. the default expansion in the legalizer.
105 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
107 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
109 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
110 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
113 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
115 setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
116 setOperationAction(ISD::BRTWOWAY_CC , MVT::Other, Expand);
117 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
118 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
119 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
120 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
121 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
122 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
123 setOperationAction(ISD::FREM , MVT::f64 , Expand);
124 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
125 setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
126 setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
127 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
128 setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
129 setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
130 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
131 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
132 setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
133 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
136 setOperationAction(ISD::BSWAP , MVT::i32 , Expand);
137 setOperationAction(ISD::ROTL , MVT::i8 , Expand);
138 setOperationAction(ISD::ROTR , MVT::i8 , Expand);
139 setOperationAction(ISD::ROTL , MVT::i16 , Expand);
140 setOperationAction(ISD::ROTR , MVT::i16 , Expand);
141 setOperationAction(ISD::ROTL , MVT::i32 , Expand);
142 setOperationAction(ISD::ROTR , MVT::i32 , Expand);
144 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
146 setOperationAction(ISD::READIO , MVT::i1 , Expand);
147 setOperationAction(ISD::READIO , MVT::i8 , Expand);
148 setOperationAction(ISD::READIO , MVT::i16 , Expand);
149 setOperationAction(ISD::READIO , MVT::i32 , Expand);
150 setOperationAction(ISD::WRITEIO , MVT::i1 , Expand);
151 setOperationAction(ISD::WRITEIO , MVT::i8 , Expand);
152 setOperationAction(ISD::WRITEIO , MVT::i16 , Expand);
153 setOperationAction(ISD::WRITEIO , MVT::i32 , Expand);
155 // These should be promoted to a larger select which is supported.
156 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
157 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
159 // X86 wants to expand cmov itself.
160 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
161 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
162 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
163 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
164 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
165 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
166 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
167 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
168 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
169 // X86 ret instruction may pop stack.
170 setOperationAction(ISD::RET , MVT::Other, Custom);
172 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
173 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
174 setOperationAction(ISD::ADD_PARTS , MVT::i32 , Custom);
175 setOperationAction(ISD::SUB_PARTS , MVT::i32 , Custom);
176 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
177 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
178 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
179 // X86 wants to expand memset / memcpy itself.
180 setOperationAction(ISD::MEMSET , MVT::Other, Custom);
181 setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
184 // We don't have line number support yet.
185 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
186 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
187 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
189 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
190 setOperationAction(ISD::VASTART , MVT::Other, Custom);
192 // Use the default implementation.
193 setOperationAction(ISD::VAARG , MVT::Other, Expand);
194 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
195 setOperationAction(ISD::VAEND , MVT::Other, Expand);
196 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
197 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
198 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
201 // Set up the FP register classes.
202 addRegisterClass(MVT::f32, X86::FR32RegisterClass);
203 addRegisterClass(MVT::f64, X86::FR64RegisterClass);
205 // SSE has no load+extend ops
206 setOperationAction(ISD::EXTLOAD, MVT::f32, Expand);
207 setOperationAction(ISD::ZEXTLOAD, MVT::f32, Expand);
209 // We don't support sin/cos/sqrt/fmod
210 setOperationAction(ISD::FSIN , MVT::f64, Expand);
211 setOperationAction(ISD::FCOS , MVT::f64, Expand);
212 setOperationAction(ISD::FABS , MVT::f64, Expand);
213 setOperationAction(ISD::FNEG , MVT::f64, Expand);
214 setOperationAction(ISD::FREM , MVT::f64, Expand);
215 setOperationAction(ISD::FSIN , MVT::f32, Expand);
216 setOperationAction(ISD::FCOS , MVT::f32, Expand);
217 setOperationAction(ISD::FABS , MVT::f32, Expand);
218 setOperationAction(ISD::FNEG , MVT::f32, Expand);
219 setOperationAction(ISD::FREM , MVT::f32, Expand);
221 // Expand FP immediates into loads from the stack, except for the special
223 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
224 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
225 addLegalFPImmediate(+0.0); // xorps / xorpd
227 // Set up the FP register classes.
228 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
230 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
233 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
234 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
237 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
238 addLegalFPImmediate(+0.0); // FLD0
239 addLegalFPImmediate(+1.0); // FLD1
240 addLegalFPImmediate(-0.0); // FLD0/FCHS
241 addLegalFPImmediate(-1.0); // FLD1/FCHS
243 computeRegisterProperties();
245 maxStoresPerMemSet = 8; // For %llvm.memset -> sequence of stores
246 maxStoresPerMemCpy = 8; // For %llvm.memcpy -> sequence of stores
247 maxStoresPerMemMove = 8; // For %llvm.memmove -> sequence of stores
248 allowUnalignedMemoryAccesses = true; // x86 supports it!
251 std::vector<SDOperand>
252 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
253 if (F.getCallingConv() == CallingConv::Fast && EnableFastCC)
254 return LowerFastCCArguments(F, DAG);
255 return LowerCCCArguments(F, DAG);
258 std::pair<SDOperand, SDOperand>
259 X86TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
260 bool isVarArg, unsigned CallingConv,
262 SDOperand Callee, ArgListTy &Args,
264 assert((!isVarArg || CallingConv == CallingConv::C) &&
265 "Only C takes varargs!");
267 // If the callee is a GlobalAddress node (quite common, every direct call is)
268 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
269 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
270 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
271 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
272 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
274 if (CallingConv == CallingConv::Fast && EnableFastCC)
275 return LowerFastCCCallTo(Chain, RetTy, isTailCall, Callee, Args, DAG);
276 return LowerCCCCallTo(Chain, RetTy, isVarArg, isTailCall, Callee, Args, DAG);
279 //===----------------------------------------------------------------------===//
280 // C Calling Convention implementation
281 //===----------------------------------------------------------------------===//
283 std::vector<SDOperand>
284 X86TargetLowering::LowerCCCArguments(Function &F, SelectionDAG &DAG) {
285 std::vector<SDOperand> ArgValues;
287 MachineFunction &MF = DAG.getMachineFunction();
288 MachineFrameInfo *MFI = MF.getFrameInfo();
290 // Add DAG nodes to load the arguments... On entry to a function on the X86,
291 // the stack frame looks like this:
293 // [ESP] -- return address
294 // [ESP + 4] -- first argument (leftmost lexically)
295 // [ESP + 8] -- second argument, if first argument is four bytes in size
298 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
299 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
300 MVT::ValueType ObjectVT = getValueType(I->getType());
301 unsigned ArgIncrement = 4;
304 default: assert(0 && "Unhandled argument type!");
306 case MVT::i8: ObjSize = 1; break;
307 case MVT::i16: ObjSize = 2; break;
308 case MVT::i32: ObjSize = 4; break;
309 case MVT::i64: ObjSize = ArgIncrement = 8; break;
310 case MVT::f32: ObjSize = 4; break;
311 case MVT::f64: ObjSize = ArgIncrement = 8; break;
313 // Create the frame index object for this incoming parameter...
314 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
316 // Create the SelectionDAG nodes corresponding to a load from this parameter
317 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
319 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
323 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
324 DAG.getSrcValue(NULL));
326 if (MVT::isInteger(ObjectVT))
327 ArgValue = DAG.getConstant(0, ObjectVT);
329 ArgValue = DAG.getConstantFP(0, ObjectVT);
331 ArgValues.push_back(ArgValue);
333 ArgOffset += ArgIncrement; // Move on to the next argument...
336 // If the function takes variable number of arguments, make a frame index for
337 // the start of the first vararg value... for expansion of llvm.va_start.
339 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
340 ReturnAddrIndex = 0; // No return address slot generated yet.
341 BytesToPopOnReturn = 0; // Callee pops nothing.
342 BytesCallerReserves = ArgOffset;
344 // Finally, inform the code generator which regs we return values in.
345 switch (getValueType(F.getReturnType())) {
346 default: assert(0 && "Unknown type!");
347 case MVT::isVoid: break;
352 MF.addLiveOut(X86::EAX);
355 MF.addLiveOut(X86::EAX);
356 MF.addLiveOut(X86::EDX);
360 MF.addLiveOut(X86::ST0);
366 std::pair<SDOperand, SDOperand>
367 X86TargetLowering::LowerCCCCallTo(SDOperand Chain, const Type *RetTy,
368 bool isVarArg, bool isTailCall,
369 SDOperand Callee, ArgListTy &Args,
371 // Count how many bytes are to be pushed on the stack.
372 unsigned NumBytes = 0;
376 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
377 DAG.getConstant(0, getPointerTy()));
379 for (unsigned i = 0, e = Args.size(); i != e; ++i)
380 switch (getValueType(Args[i].second)) {
381 default: assert(0 && "Unknown value type!");
395 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
396 DAG.getConstant(NumBytes, getPointerTy()));
398 // Arguments go on the stack in reverse order, as specified by the ABI.
399 unsigned ArgOffset = 0;
400 SDOperand StackPtr = DAG.getRegister(X86::ESP, MVT::i32);
401 std::vector<SDOperand> Stores;
403 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
404 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
405 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
407 switch (getValueType(Args[i].second)) {
408 default: assert(0 && "Unexpected ValueType for argument!");
412 // Promote the integer to 32 bits. If the input type is signed use a
413 // sign extend, otherwise use a zero extend.
414 if (Args[i].second->isSigned())
415 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
417 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
422 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
423 Args[i].first, PtrOff,
424 DAG.getSrcValue(NULL)));
429 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
430 Args[i].first, PtrOff,
431 DAG.getSrcValue(NULL)));
436 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
439 std::vector<MVT::ValueType> RetVals;
440 MVT::ValueType RetTyVT = getValueType(RetTy);
441 RetVals.push_back(MVT::Other);
443 // The result values produced have to be legal. Promote the result.
445 case MVT::isVoid: break;
447 RetVals.push_back(RetTyVT);
452 RetVals.push_back(MVT::i32);
456 RetVals.push_back(MVT::f32);
458 RetVals.push_back(MVT::f64);
461 RetVals.push_back(MVT::i32);
462 RetVals.push_back(MVT::i32);
467 std::vector<MVT::ValueType> NodeTys;
468 NodeTys.push_back(MVT::Other); // Returns a chain
469 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
470 std::vector<SDOperand> Ops;
471 Ops.push_back(Chain);
472 Ops.push_back(Callee);
474 // FIXME: Do not generate X86ISD::TAILCALL for now.
475 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
476 SDOperand InFlag = Chain.getValue(1);
479 NodeTys.push_back(MVT::Other); // Returns a chain
480 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
482 Ops.push_back(Chain);
483 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
484 Ops.push_back(DAG.getConstant(0, getPointerTy()));
485 Ops.push_back(InFlag);
486 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, Ops);
487 InFlag = Chain.getValue(1);
490 if (RetTyVT != MVT::isVoid) {
492 default: assert(0 && "Unknown value type to return!");
495 RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
496 Chain = RetVal.getValue(1);
497 if (RetTyVT == MVT::i1)
498 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::i1, RetVal);
501 RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
502 Chain = RetVal.getValue(1);
505 RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
506 Chain = RetVal.getValue(1);
509 SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
510 SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
512 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
513 Chain = Hi.getValue(1);
518 std::vector<MVT::ValueType> Tys;
519 Tys.push_back(MVT::f64);
520 Tys.push_back(MVT::Other);
521 Tys.push_back(MVT::Flag);
522 std::vector<SDOperand> Ops;
523 Ops.push_back(Chain);
524 Ops.push_back(InFlag);
525 RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
526 Chain = RetVal.getValue(1);
527 InFlag = RetVal.getValue(2);
529 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
530 // shouldn't be necessary except that RFP cannot be live across
531 // multiple blocks. When stackifier is fixed, they can be uncoupled.
532 MachineFunction &MF = DAG.getMachineFunction();
533 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
534 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
536 Tys.push_back(MVT::Other);
538 Ops.push_back(Chain);
539 Ops.push_back(RetVal);
540 Ops.push_back(StackSlot);
541 Ops.push_back(DAG.getValueType(RetTyVT));
542 Ops.push_back(InFlag);
543 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
544 RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
545 DAG.getSrcValue(NULL));
546 Chain = RetVal.getValue(1);
549 if (RetTyVT == MVT::f32 && !X86ScalarSSE)
550 // FIXME: we would really like to remember that this FP_ROUND
551 // operation is okay to eliminate if we allow excess FP precision.
552 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
558 return std::make_pair(RetVal, Chain);
560 std::vector<SDOperand> Ops;
561 Ops.push_back(Chain);
562 Ops.push_back(Callee);
563 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
564 Ops.push_back(DAG.getConstant(0, getPointerTy()));
566 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
571 case MVT::isVoid: break;
573 ResultVal = TheCall.getValue(1);
578 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
581 // FIXME: we would really like to remember that this FP_ROUND operation is
582 // okay to eliminate if we allow excess FP precision.
583 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
586 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
587 TheCall.getValue(2));
591 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
592 return std::make_pair(ResultVal, Chain);
596 //===----------------------------------------------------------------------===//
597 // Fast Calling Convention implementation
598 //===----------------------------------------------------------------------===//
600 // The X86 'fast' calling convention passes up to two integer arguments in
601 // registers (an appropriate portion of EAX/EDX), passes arguments in C order,
602 // and requires that the callee pop its arguments off the stack (allowing proper
603 // tail calls), and has the same return value conventions as C calling convs.
605 // This calling convention always arranges for the callee pop value to be 8n+4
606 // bytes, which is needed for tail recursion elimination and stack alignment
609 // Note that this can be enhanced in the future to pass fp vals in registers
610 // (when we have a global fp allocator) and do other tricks.
613 /// AddLiveIn - This helper function adds the specified physical register to the
614 /// MachineFunction as a live in value. It also creates a corresponding virtual
616 static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
617 TargetRegisterClass *RC) {
618 assert(RC->contains(PReg) && "Not the correct regclass!");
619 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
620 MF.addLiveIn(PReg, VReg);
625 std::vector<SDOperand>
626 X86TargetLowering::LowerFastCCArguments(Function &F, SelectionDAG &DAG) {
627 std::vector<SDOperand> ArgValues;
629 MachineFunction &MF = DAG.getMachineFunction();
630 MachineFrameInfo *MFI = MF.getFrameInfo();
632 // Add DAG nodes to load the arguments... On entry to a function the stack
633 // frame looks like this:
635 // [ESP] -- return address
636 // [ESP + 4] -- first nonreg argument (leftmost lexically)
637 // [ESP + 8] -- second nonreg argument, if first argument is 4 bytes in size
639 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
641 // Keep track of the number of integer regs passed so far. This can be either
642 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
644 unsigned NumIntRegs = 0;
646 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
647 MVT::ValueType ObjectVT = getValueType(I->getType());
648 unsigned ArgIncrement = 4;
649 unsigned ObjSize = 0;
653 default: assert(0 && "Unhandled argument type!");
656 if (NumIntRegs < 2) {
657 if (!I->use_empty()) {
658 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
659 X86::R8RegisterClass);
660 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i8);
661 DAG.setRoot(ArgValue.getValue(1));
662 if (ObjectVT == MVT::i1)
663 // FIXME: Should insert a assertzext here.
664 ArgValue = DAG.getNode(ISD::TRUNCATE, MVT::i1, ArgValue);
673 if (NumIntRegs < 2) {
674 if (!I->use_empty()) {
675 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
676 X86::R16RegisterClass);
677 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i16);
678 DAG.setRoot(ArgValue.getValue(1));
686 if (NumIntRegs < 2) {
687 if (!I->use_empty()) {
688 unsigned VReg = AddLiveIn(MF,NumIntRegs ? X86::EDX : X86::EAX,
689 X86::R32RegisterClass);
690 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
691 DAG.setRoot(ArgValue.getValue(1));
699 if (NumIntRegs == 0) {
700 if (!I->use_empty()) {
701 unsigned BotReg = AddLiveIn(MF, X86::EAX, X86::R32RegisterClass);
702 unsigned TopReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
704 SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
705 SDOperand Hi = DAG.getCopyFromReg(Low.getValue(1), TopReg, MVT::i32);
706 DAG.setRoot(Hi.getValue(1));
708 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
712 } else if (NumIntRegs == 1) {
713 if (!I->use_empty()) {
714 unsigned BotReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
715 SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
716 DAG.setRoot(Low.getValue(1));
718 // Load the high part from memory.
719 // Create the frame index object for this incoming parameter...
720 int FI = MFI->CreateFixedObject(4, ArgOffset);
721 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
722 SDOperand Hi = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
723 DAG.getSrcValue(NULL));
724 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
730 ObjSize = ArgIncrement = 8;
732 case MVT::f32: ObjSize = 4; break;
733 case MVT::f64: ObjSize = ArgIncrement = 8; break;
736 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
738 if (ObjSize && !I->use_empty()) {
739 // Create the frame index object for this incoming parameter...
740 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
742 // Create the SelectionDAG nodes corresponding to a load from this
744 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
746 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
747 DAG.getSrcValue(NULL));
748 } else if (ArgValue.Val == 0) {
749 if (MVT::isInteger(ObjectVT))
750 ArgValue = DAG.getConstant(0, ObjectVT);
752 ArgValue = DAG.getConstantFP(0, ObjectVT);
754 ArgValues.push_back(ArgValue);
757 ArgOffset += ArgIncrement; // Move on to the next argument.
760 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
761 // arguments and the arguments after the retaddr has been pushed are aligned.
762 if ((ArgOffset & 7) == 0)
765 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
766 ReturnAddrIndex = 0; // No return address slot generated yet.
767 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
768 BytesCallerReserves = 0;
770 // Finally, inform the code generator which regs we return values in.
771 switch (getValueType(F.getReturnType())) {
772 default: assert(0 && "Unknown type!");
773 case MVT::isVoid: break;
778 MF.addLiveOut(X86::EAX);
781 MF.addLiveOut(X86::EAX);
782 MF.addLiveOut(X86::EDX);
786 MF.addLiveOut(X86::ST0);
792 std::pair<SDOperand, SDOperand>
793 X86TargetLowering::LowerFastCCCallTo(SDOperand Chain, const Type *RetTy,
794 bool isTailCall, SDOperand Callee,
795 ArgListTy &Args, SelectionDAG &DAG) {
796 // Count how many bytes are to be pushed on the stack.
797 unsigned NumBytes = 0;
799 // Keep track of the number of integer regs passed so far. This can be either
800 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
802 unsigned NumIntRegs = 0;
804 for (unsigned i = 0, e = Args.size(); i != e; ++i)
805 switch (getValueType(Args[i].second)) {
806 default: assert(0 && "Unknown value type!");
811 if (NumIntRegs < 2) {
820 if (NumIntRegs == 0) {
823 } else if (NumIntRegs == 1) {
835 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
836 // arguments and the arguments after the retaddr has been pushed are aligned.
837 if ((NumBytes & 7) == 0)
840 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
841 DAG.getConstant(NumBytes, getPointerTy()));
843 // Arguments go on the stack in reverse order, as specified by the ABI.
844 unsigned ArgOffset = 0;
845 SDOperand StackPtr = DAG.getRegister(X86::ESP, MVT::i32);
847 std::vector<SDOperand> Stores;
848 std::vector<SDOperand> RegValuesToPass;
849 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
850 switch (getValueType(Args[i].second)) {
851 default: assert(0 && "Unexpected ValueType for argument!");
853 Args[i].first = DAG.getNode(ISD::ANY_EXTEND, MVT::i8, Args[i].first);
858 if (NumIntRegs < 2) {
859 RegValuesToPass.push_back(Args[i].first);
865 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
866 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
867 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
868 Args[i].first, PtrOff,
869 DAG.getSrcValue(NULL)));
874 if (NumIntRegs < 2) { // Can pass part of it in regs?
875 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
876 Args[i].first, DAG.getConstant(1, MVT::i32));
877 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
878 Args[i].first, DAG.getConstant(0, MVT::i32));
879 RegValuesToPass.push_back(Lo);
881 if (NumIntRegs < 2) { // Pass both parts in regs?
882 RegValuesToPass.push_back(Hi);
885 // Pass the high part in memory.
886 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
887 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
888 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
889 Hi, PtrOff, DAG.getSrcValue(NULL)));
896 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
897 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
898 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
899 Args[i].first, PtrOff,
900 DAG.getSrcValue(NULL)));
906 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
908 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
909 // arguments and the arguments after the retaddr has been pushed are aligned.
910 if ((ArgOffset & 7) == 0)
913 std::vector<MVT::ValueType> RetVals;
914 MVT::ValueType RetTyVT = getValueType(RetTy);
916 RetVals.push_back(MVT::Other);
918 // The result values produced have to be legal. Promote the result.
920 case MVT::isVoid: break;
922 RetVals.push_back(RetTyVT);
927 RetVals.push_back(MVT::i32);
931 RetVals.push_back(MVT::f32);
933 RetVals.push_back(MVT::f64);
936 RetVals.push_back(MVT::i32);
937 RetVals.push_back(MVT::i32);
942 // Build a sequence of copy-to-reg nodes chained together with token chain
943 // and flag operands which copy the outgoing args into registers.
945 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
947 SDOperand RegToPass = RegValuesToPass[i];
948 switch (RegToPass.getValueType()) {
949 default: assert(0 && "Bad thing to pass in regs");
951 CCReg = (i == 0) ? X86::AL : X86::DL;
954 CCReg = (i == 0) ? X86::AX : X86::DX;
957 CCReg = (i == 0) ? X86::EAX : X86::EDX;
961 Chain = DAG.getCopyToReg(Chain, CCReg, RegToPass, InFlag);
962 InFlag = Chain.getValue(1);
965 std::vector<MVT::ValueType> NodeTys;
966 NodeTys.push_back(MVT::Other); // Returns a chain
967 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
968 std::vector<SDOperand> Ops;
969 Ops.push_back(Chain);
970 Ops.push_back(Callee);
972 Ops.push_back(InFlag);
974 // FIXME: Do not generate X86ISD::TAILCALL for now.
975 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
976 InFlag = Chain.getValue(1);
979 NodeTys.push_back(MVT::Other); // Returns a chain
980 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
982 Ops.push_back(Chain);
983 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
984 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
985 Ops.push_back(InFlag);
986 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, Ops);
987 InFlag = Chain.getValue(1);
990 if (RetTyVT != MVT::isVoid) {
992 default: assert(0 && "Unknown value type to return!");
995 RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
996 Chain = RetVal.getValue(1);
997 if (RetTyVT == MVT::i1)
998 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::i1, RetVal);
1001 RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
1002 Chain = RetVal.getValue(1);
1005 RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
1006 Chain = RetVal.getValue(1);
1009 SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
1010 SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
1012 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
1013 Chain = Hi.getValue(1);
1018 std::vector<MVT::ValueType> Tys;
1019 Tys.push_back(MVT::f64);
1020 Tys.push_back(MVT::Other);
1021 Tys.push_back(MVT::Flag);
1022 std::vector<SDOperand> Ops;
1023 Ops.push_back(Chain);
1024 Ops.push_back(InFlag);
1025 RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
1026 Chain = RetVal.getValue(1);
1027 InFlag = RetVal.getValue(2);
1029 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
1030 // shouldn't be necessary except that RFP cannot be live across
1031 // multiple blocks. When stackifier is fixed, they can be uncoupled.
1032 MachineFunction &MF = DAG.getMachineFunction();
1033 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
1034 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1036 Tys.push_back(MVT::Other);
1038 Ops.push_back(Chain);
1039 Ops.push_back(RetVal);
1040 Ops.push_back(StackSlot);
1041 Ops.push_back(DAG.getValueType(RetTyVT));
1042 Ops.push_back(InFlag);
1043 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
1044 RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
1045 DAG.getSrcValue(NULL));
1046 Chain = RetVal.getValue(1);
1049 if (RetTyVT == MVT::f32 && !X86ScalarSSE)
1050 // FIXME: we would really like to remember that this FP_ROUND
1051 // operation is okay to eliminate if we allow excess FP precision.
1052 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
1058 return std::make_pair(RetVal, Chain);
1060 std::vector<SDOperand> Ops;
1061 Ops.push_back(Chain);
1062 Ops.push_back(Callee);
1063 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
1064 // Callee pops all arg values on the stack.
1065 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
1067 // Pass register arguments as needed.
1068 Ops.insert(Ops.end(), RegValuesToPass.begin(), RegValuesToPass.end());
1070 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
1072 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
1074 SDOperand ResultVal;
1076 case MVT::isVoid: break;
1078 ResultVal = TheCall.getValue(1);
1083 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
1086 // FIXME: we would really like to remember that this FP_ROUND operation is
1087 // okay to eliminate if we allow excess FP precision.
1088 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
1091 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
1092 TheCall.getValue(2));
1096 return std::make_pair(ResultVal, Chain);
1100 SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
1101 if (ReturnAddrIndex == 0) {
1102 // Set up a frame object for the return address.
1103 MachineFunction &MF = DAG.getMachineFunction();
1104 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
1107 return DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
1112 std::pair<SDOperand, SDOperand> X86TargetLowering::
1113 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
1114 SelectionDAG &DAG) {
1116 if (Depth) // Depths > 0 not supported yet!
1117 Result = DAG.getConstant(0, getPointerTy());
1119 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
1120 if (!isFrameAddress)
1121 // Just load the return address
1122 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI,
1123 DAG.getSrcValue(NULL));
1125 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
1126 DAG.getConstant(4, MVT::i32));
1128 return std::make_pair(Result, Chain);
1131 /// getCondBrOpcodeForX86CC - Returns the X86 conditional branch opcode
1132 /// which corresponds to the condition code.
1133 static unsigned getCondBrOpcodeForX86CC(unsigned X86CC) {
1135 default: assert(0 && "Unknown X86 conditional code!");
1136 case X86ISD::COND_A: return X86::JA;
1137 case X86ISD::COND_AE: return X86::JAE;
1138 case X86ISD::COND_B: return X86::JB;
1139 case X86ISD::COND_BE: return X86::JBE;
1140 case X86ISD::COND_E: return X86::JE;
1141 case X86ISD::COND_G: return X86::JG;
1142 case X86ISD::COND_GE: return X86::JGE;
1143 case X86ISD::COND_L: return X86::JL;
1144 case X86ISD::COND_LE: return X86::JLE;
1145 case X86ISD::COND_NE: return X86::JNE;
1146 case X86ISD::COND_NO: return X86::JNO;
1147 case X86ISD::COND_NP: return X86::JNP;
1148 case X86ISD::COND_NS: return X86::JNS;
1149 case X86ISD::COND_O: return X86::JO;
1150 case X86ISD::COND_P: return X86::JP;
1151 case X86ISD::COND_S: return X86::JS;
1155 /// translateX86CC - do a one to one translation of a ISD::CondCode to the X86
1156 /// specific condition code. It returns a false if it cannot do a direct
1157 /// translation. X86CC is the translated CondCode. Flip is set to true if the
1158 /// the order of comparison operands should be flipped.
1159 static bool translateX86CC(SDOperand CC, bool isFP, unsigned &X86CC, bool &Flip) {
1160 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
1162 X86CC = X86ISD::COND_INVALID;
1164 switch (SetCCOpcode) {
1166 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
1167 case ISD::SETGT: X86CC = X86ISD::COND_G; break;
1168 case ISD::SETGE: X86CC = X86ISD::COND_GE; break;
1169 case ISD::SETLT: X86CC = X86ISD::COND_L; break;
1170 case ISD::SETLE: X86CC = X86ISD::COND_LE; break;
1171 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1172 case ISD::SETULT: X86CC = X86ISD::COND_B; break;
1173 case ISD::SETUGT: X86CC = X86ISD::COND_A; break;
1174 case ISD::SETULE: X86CC = X86ISD::COND_BE; break;
1175 case ISD::SETUGE: X86CC = X86ISD::COND_AE; break;
1178 // On a floating point condition, the flags are set as follows:
1180 // 0 | 0 | 0 | X > Y
1181 // 0 | 0 | 1 | X < Y
1182 // 1 | 0 | 0 | X == Y
1183 // 1 | 1 | 1 | unordered
1184 switch (SetCCOpcode) {
1187 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
1188 case ISD::SETOLE: Flip = true; // Fallthrough
1190 case ISD::SETGT: X86CC = X86ISD::COND_A; break;
1191 case ISD::SETOLT: Flip = true; // Fallthrough
1193 case ISD::SETGE: X86CC = X86ISD::COND_AE; break;
1194 case ISD::SETUGE: Flip = true; // Fallthrough
1196 case ISD::SETLT: X86CC = X86ISD::COND_B; break;
1197 case ISD::SETUGT: Flip = true; // Fallthrough
1199 case ISD::SETLE: X86CC = X86ISD::COND_BE; break;
1201 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1202 case ISD::SETUO: X86CC = X86ISD::COND_P; break;
1203 case ISD::SETO: X86CC = X86ISD::COND_NP; break;
1207 return X86CC != X86ISD::COND_INVALID;
1210 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
1211 /// code. Current x86 isa includes the following FP cmov instructions:
1212 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
1213 static bool hasFPCMov(unsigned X86CC) {
1217 case X86ISD::COND_B:
1218 case X86ISD::COND_BE:
1219 case X86ISD::COND_E:
1220 case X86ISD::COND_P:
1221 case X86ISD::COND_A:
1222 case X86ISD::COND_AE:
1223 case X86ISD::COND_NE:
1224 case X86ISD::COND_NP:
1230 X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1231 MachineBasicBlock *BB) {
1232 switch (MI->getOpcode()) {
1233 default: assert(false && "Unexpected instr type to insert");
1234 case X86::CMOV_FR32:
1235 case X86::CMOV_FR64: {
1236 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1237 // control-flow pattern. The incoming instruction knows the destination vreg
1238 // to set, the condition code register to branch on, the true/false values to
1239 // select between, and a branch opcode to use.
1240 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1241 ilist<MachineBasicBlock>::iterator It = BB;
1247 // cmpTY ccX, r1, r2
1249 // fallthrough --> copy0MBB
1250 MachineBasicBlock *thisMBB = BB;
1251 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
1252 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
1253 unsigned Opc = getCondBrOpcodeForX86CC(MI->getOperand(3).getImmedValue());
1254 BuildMI(BB, Opc, 1).addMBB(sinkMBB);
1255 MachineFunction *F = BB->getParent();
1256 F->getBasicBlockList().insert(It, copy0MBB);
1257 F->getBasicBlockList().insert(It, sinkMBB);
1258 // Update machine-CFG edges
1259 BB->addSuccessor(copy0MBB);
1260 BB->addSuccessor(sinkMBB);
1263 // %FalseValue = ...
1264 // # fallthrough to sinkMBB
1267 // Update machine-CFG edges
1268 BB->addSuccessor(sinkMBB);
1271 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1274 BuildMI(BB, X86::PHI, 4, MI->getOperand(0).getReg())
1275 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
1276 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1278 delete MI; // The pseudo instruction is gone now.
1282 case X86::FP_TO_INT16_IN_MEM:
1283 case X86::FP_TO_INT32_IN_MEM:
1284 case X86::FP_TO_INT64_IN_MEM: {
1285 // Change the floating point control register to use "round towards zero"
1286 // mode when truncating to an integer value.
1287 MachineFunction *F = BB->getParent();
1288 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
1289 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1291 // Load the old value of the high byte of the control word...
1293 F->getSSARegMap()->createVirtualRegister(X86::R16RegisterClass);
1294 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
1296 // Set the high part to be round to zero...
1297 addFrameReference(BuildMI(BB, X86::MOV16mi, 5), CWFrameIdx).addImm(0xC7F);
1299 // Reload the modified control word now...
1300 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1302 // Restore the memory image of control word to original value
1303 addFrameReference(BuildMI(BB, X86::MOV16mr, 5), CWFrameIdx).addReg(OldCW);
1305 // Get the X86 opcode to use.
1307 switch (MI->getOpcode()) {
1308 default: assert(0 && "illegal opcode!");
1309 case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break;
1310 case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break;
1311 case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break;
1315 MachineOperand &Op = MI->getOperand(0);
1316 if (Op.isRegister()) {
1317 AM.BaseType = X86AddressMode::RegBase;
1318 AM.Base.Reg = Op.getReg();
1320 AM.BaseType = X86AddressMode::FrameIndexBase;
1321 AM.Base.FrameIndex = Op.getFrameIndex();
1323 Op = MI->getOperand(1);
1324 if (Op.isImmediate())
1325 AM.Scale = Op.getImmedValue();
1326 Op = MI->getOperand(2);
1327 if (Op.isImmediate())
1328 AM.IndexReg = Op.getImmedValue();
1329 Op = MI->getOperand(3);
1330 if (Op.isGlobalAddress()) {
1331 AM.GV = Op.getGlobal();
1333 AM.Disp = Op.getImmedValue();
1335 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(MI->getOperand(4).getReg());
1337 // Reload the original control word now.
1338 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1340 delete MI; // The pseudo instruction is gone now.
1347 //===----------------------------------------------------------------------===//
1348 // X86 Custom Lowering Hooks
1349 //===----------------------------------------------------------------------===//
1351 /// LowerOperation - Provide custom lowering hooks for some operations.
1353 SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
1354 switch (Op.getOpcode()) {
1355 default: assert(0 && "Should not custom lower this!");
1356 case ISD::ADD_PARTS:
1357 case ISD::SUB_PARTS: {
1358 assert(Op.getNumOperands() == 4 && Op.getValueType() == MVT::i32 &&
1359 "Not an i64 add/sub!");
1360 bool isAdd = Op.getOpcode() == ISD::ADD_PARTS;
1361 std::vector<MVT::ValueType> Tys;
1362 Tys.push_back(MVT::i32);
1363 Tys.push_back(MVT::Flag);
1364 std::vector<SDOperand> Ops;
1365 Ops.push_back(Op.getOperand(0));
1366 Ops.push_back(Op.getOperand(2));
1367 SDOperand Lo = DAG.getNode(isAdd ? X86ISD::ADD_FLAG : X86ISD::SUB_FLAG,
1369 SDOperand Hi = DAG.getNode(isAdd ? X86ISD::ADC : X86ISD::SBB, MVT::i32,
1370 Op.getOperand(1), Op.getOperand(3),
1373 Tys.push_back(MVT::i32);
1374 Tys.push_back(MVT::i32);
1378 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
1380 case ISD::SHL_PARTS:
1381 case ISD::SRA_PARTS:
1382 case ISD::SRL_PARTS: {
1383 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
1384 "Not an i64 shift!");
1385 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
1386 SDOperand ShOpLo = Op.getOperand(0);
1387 SDOperand ShOpHi = Op.getOperand(1);
1388 SDOperand ShAmt = Op.getOperand(2);
1389 SDOperand Tmp1 = isSRA ? DAG.getNode(ISD::SRA, MVT::i32, ShOpHi,
1390 DAG.getConstant(31, MVT::i8))
1391 : DAG.getConstant(0, MVT::i32);
1393 SDOperand Tmp2, Tmp3;
1394 if (Op.getOpcode() == ISD::SHL_PARTS) {
1395 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt);
1396 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt);
1398 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt);
1399 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt);
1402 SDOperand InFlag = DAG.getNode(X86ISD::TEST, MVT::Flag,
1403 ShAmt, DAG.getConstant(32, MVT::i8));
1406 SDOperand CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
1408 std::vector<MVT::ValueType> Tys;
1409 Tys.push_back(MVT::i32);
1410 Tys.push_back(MVT::Flag);
1411 std::vector<SDOperand> Ops;
1412 if (Op.getOpcode() == ISD::SHL_PARTS) {
1413 Ops.push_back(Tmp2);
1414 Ops.push_back(Tmp3);
1416 Ops.push_back(InFlag);
1417 Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1418 InFlag = Hi.getValue(1);
1421 Ops.push_back(Tmp3);
1422 Ops.push_back(Tmp1);
1424 Ops.push_back(InFlag);
1425 Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1427 Ops.push_back(Tmp2);
1428 Ops.push_back(Tmp3);
1430 Ops.push_back(InFlag);
1431 Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1432 InFlag = Lo.getValue(1);
1435 Ops.push_back(Tmp3);
1436 Ops.push_back(Tmp1);
1438 Ops.push_back(InFlag);
1439 Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1443 Tys.push_back(MVT::i32);
1444 Tys.push_back(MVT::i32);
1448 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
1450 case ISD::SINT_TO_FP: {
1451 assert(Op.getOperand(0).getValueType() <= MVT::i64 &&
1452 Op.getOperand(0).getValueType() >= MVT::i16 &&
1453 "Unknown SINT_TO_FP to lower!");
1456 MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
1457 unsigned Size = MVT::getSizeInBits(SrcVT)/8;
1458 MachineFunction &MF = DAG.getMachineFunction();
1459 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
1460 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1461 SDOperand Chain = DAG.getNode(ISD::STORE, MVT::Other,
1462 DAG.getEntryNode(), Op.getOperand(0),
1463 StackSlot, DAG.getSrcValue(NULL));
1466 std::vector<MVT::ValueType> Tys;
1467 Tys.push_back(MVT::f64);
1468 Tys.push_back(MVT::Other);
1469 Tys.push_back(MVT::Flag);
1470 std::vector<SDOperand> Ops;
1471 Ops.push_back(Chain);
1472 Ops.push_back(StackSlot);
1473 Ops.push_back(DAG.getValueType(SrcVT));
1474 Result = DAG.getNode(X86ISD::FILD, Tys, Ops);
1477 Chain = Result.getValue(1);
1478 SDOperand InFlag = Result.getValue(2);
1480 // FIXME: Currently the FST is flagged to the FILD. This
1481 // shouldn't be necessary except that RFP cannot be live across
1482 // multiple blocks. When stackifier is fixed, they can be uncoupled.
1483 MachineFunction &MF = DAG.getMachineFunction();
1484 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
1485 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1486 std::vector<MVT::ValueType> Tys;
1487 Tys.push_back(MVT::Other);
1488 std::vector<SDOperand> Ops;
1489 Ops.push_back(Chain);
1490 Ops.push_back(Result);
1491 Ops.push_back(StackSlot);
1492 Ops.push_back(DAG.getValueType(Op.getValueType()));
1493 Ops.push_back(InFlag);
1494 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
1495 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot,
1496 DAG.getSrcValue(NULL));
1501 case ISD::FP_TO_SINT: {
1502 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
1503 "Unknown FP_TO_SINT to lower!");
1504 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
1506 MachineFunction &MF = DAG.getMachineFunction();
1507 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
1508 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
1509 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1512 switch (Op.getValueType()) {
1513 default: assert(0 && "Invalid FP_TO_SINT to lower!");
1514 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
1515 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
1516 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
1519 SDOperand Chain = DAG.getEntryNode();
1520 SDOperand Value = Op.getOperand(0);
1522 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
1523 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value, StackSlot,
1524 DAG.getSrcValue(0));
1525 std::vector<MVT::ValueType> Tys;
1526 Tys.push_back(MVT::f64);
1527 Tys.push_back(MVT::Other);
1528 std::vector<SDOperand> Ops;
1529 Ops.push_back(Chain);
1530 Ops.push_back(StackSlot);
1531 Ops.push_back(DAG.getValueType(Op.getOperand(0).getValueType()));
1532 Value = DAG.getNode(X86ISD::FLD, Tys, Ops);
1533 Chain = Value.getValue(1);
1534 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
1535 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1538 // Build the FP_TO_INT*_IN_MEM
1539 std::vector<SDOperand> Ops;
1540 Ops.push_back(Chain);
1541 Ops.push_back(Value);
1542 Ops.push_back(StackSlot);
1543 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops);
1546 return DAG.getLoad(Op.getValueType(), FIST, StackSlot,
1547 DAG.getSrcValue(NULL));
1549 case ISD::READCYCLECOUNTER: {
1550 std::vector<MVT::ValueType> Tys;
1551 Tys.push_back(MVT::Other);
1552 Tys.push_back(MVT::Flag);
1553 std::vector<SDOperand> Ops;
1554 Ops.push_back(Op.getOperand(0));
1555 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, Ops);
1557 Ops.push_back(DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)));
1558 Ops.push_back(DAG.getCopyFromReg(Ops[0].getValue(1), X86::EDX,
1559 MVT::i32, Ops[0].getValue(2)));
1560 Ops.push_back(Ops[1].getValue(1));
1561 Tys[0] = Tys[1] = MVT::i32;
1562 Tys.push_back(MVT::Other);
1563 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
1566 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
1568 SDOperand CC = Op.getOperand(2);
1569 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
1570 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
1573 if (translateX86CC(CC, isFP, X86CC, Flip)) {
1575 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1576 Op.getOperand(1), Op.getOperand(0));
1578 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1579 Op.getOperand(0), Op.getOperand(1));
1580 return DAG.getNode(X86ISD::SETCC, MVT::i8,
1581 DAG.getConstant(X86CC, MVT::i8), Cond);
1583 assert(isFP && "Illegal integer SetCC!");
1585 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1586 Op.getOperand(0), Op.getOperand(1));
1587 std::vector<MVT::ValueType> Tys;
1588 std::vector<SDOperand> Ops;
1589 switch (SetCCOpcode) {
1590 default: assert(false && "Illegal floating point SetCC!");
1591 case ISD::SETOEQ: { // !PF & ZF
1592 Tys.push_back(MVT::i8);
1593 Tys.push_back(MVT::Flag);
1594 Ops.push_back(DAG.getConstant(X86ISD::COND_NP, MVT::i8));
1595 Ops.push_back(Cond);
1596 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1597 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
1598 DAG.getConstant(X86ISD::COND_E, MVT::i8),
1600 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
1602 case ISD::SETUNE: { // PF | !ZF
1603 Tys.push_back(MVT::i8);
1604 Tys.push_back(MVT::Flag);
1605 Ops.push_back(DAG.getConstant(X86ISD::COND_P, MVT::i8));
1606 Ops.push_back(Cond);
1607 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1608 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
1609 DAG.getConstant(X86ISD::COND_NE, MVT::i8),
1611 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
1617 MVT::ValueType VT = Op.getValueType();
1618 bool isFP = MVT::isFloatingPoint(VT);
1619 bool isFPStack = isFP && !X86ScalarSSE;
1620 bool isFPSSE = isFP && X86ScalarSSE;
1621 bool addTest = false;
1622 SDOperand Op0 = Op.getOperand(0);
1624 if (Op0.getOpcode() == ISD::SETCC)
1625 Op0 = LowerOperation(Op0, DAG);
1627 if (Op0.getOpcode() == X86ISD::SETCC) {
1628 // If condition flag is set by a X86ISD::CMP, then make a copy of it
1629 // (since flag operand cannot be shared). If the X86ISD::SETCC does not
1630 // have another use it will be eliminated.
1631 // If the X86ISD::SETCC has more than one use, then it's probably better
1632 // to use a test instead of duplicating the X86ISD::CMP (for register
1633 // pressure reason).
1634 if (Op0.getOperand(1).getOpcode() == X86ISD::CMP) {
1635 if (!Op0.hasOneUse()) {
1636 std::vector<MVT::ValueType> Tys;
1637 for (unsigned i = 0; i < Op0.Val->getNumValues(); ++i)
1638 Tys.push_back(Op0.Val->getValueType(i));
1639 std::vector<SDOperand> Ops;
1640 for (unsigned i = 0; i < Op0.getNumOperands(); ++i)
1641 Ops.push_back(Op0.getOperand(i));
1642 Op0 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1645 CC = Op0.getOperand(0);
1646 Cond = Op0.getOperand(1);
1647 // Make a copy as flag result cannot be used by more than one.
1648 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1649 Cond.getOperand(0), Cond.getOperand(1));
1651 isFPStack && !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
1658 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
1659 Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Op0, Op0);
1662 std::vector<MVT::ValueType> Tys;
1663 Tys.push_back(Op.getValueType());
1664 Tys.push_back(MVT::Flag);
1665 std::vector<SDOperand> Ops;
1666 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
1667 // condition is true.
1668 Ops.push_back(Op.getOperand(2));
1669 Ops.push_back(Op.getOperand(1));
1671 Ops.push_back(Cond);
1672 return DAG.getNode(X86ISD::CMOV, Tys, Ops);
1675 bool addTest = false;
1676 SDOperand Cond = Op.getOperand(1);
1677 SDOperand Dest = Op.getOperand(2);
1679 if (Cond.getOpcode() == ISD::SETCC)
1680 Cond = LowerOperation(Cond, DAG);
1682 if (Cond.getOpcode() == X86ISD::SETCC) {
1683 // If condition flag is set by a X86ISD::CMP, then make a copy of it
1684 // (since flag operand cannot be shared). If the X86ISD::SETCC does not
1685 // have another use it will be eliminated.
1686 // If the X86ISD::SETCC has more than one use, then it's probably better
1687 // to use a test instead of duplicating the X86ISD::CMP (for register
1688 // pressure reason).
1689 if (Cond.getOperand(1).getOpcode() == X86ISD::CMP) {
1690 if (!Cond.hasOneUse()) {
1691 std::vector<MVT::ValueType> Tys;
1692 for (unsigned i = 0; i < Cond.Val->getNumValues(); ++i)
1693 Tys.push_back(Cond.Val->getValueType(i));
1694 std::vector<SDOperand> Ops;
1695 for (unsigned i = 0; i < Cond.getNumOperands(); ++i)
1696 Ops.push_back(Cond.getOperand(i));
1697 Cond = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1700 CC = Cond.getOperand(0);
1701 Cond = Cond.getOperand(1);
1702 // Make a copy as flag result cannot be used by more than one.
1703 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1704 Cond.getOperand(0), Cond.getOperand(1));
1711 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
1712 Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Cond, Cond);
1714 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
1715 Op.getOperand(0), Op.getOperand(2), CC, Cond);
1719 SDOperand Chain = Op.getOperand(0);
1721 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
1722 if (Align == 0) Align = 1;
1726 if (ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2))) {
1728 unsigned Val = ValC->getValue() & 255;
1730 // If the value is a constant, then we can potentially use larger sets.
1731 switch (Align & 3) {
1732 case 2: // WORD aligned
1734 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
1735 Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
1737 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
1738 DAG.getConstant(1, MVT::i8));
1739 Val = (Val << 8) | Val;
1742 case 0: // DWORD aligned
1744 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
1745 Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
1747 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
1748 DAG.getConstant(2, MVT::i8));
1749 Val = (Val << 8) | Val;
1750 Val = (Val << 16) | Val;
1753 default: // Byte aligned
1755 Count = Op.getOperand(3);
1760 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT),
1762 InFlag = Chain.getValue(1);
1765 Count = Op.getOperand(3);
1766 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag);
1767 InFlag = Chain.getValue(1);
1770 Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
1771 InFlag = Chain.getValue(1);
1772 Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
1773 InFlag = Chain.getValue(1);
1775 return DAG.getNode(X86ISD::REP_STOS, MVT::Other, Chain,
1776 DAG.getValueType(AVT), InFlag);
1779 SDOperand Chain = Op.getOperand(0);
1781 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
1782 if (Align == 0) Align = 1;
1786 switch (Align & 3) {
1787 case 2: // WORD aligned
1789 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
1790 Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
1792 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
1793 DAG.getConstant(1, MVT::i8));
1795 case 0: // DWORD aligned
1797 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
1798 Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
1800 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
1801 DAG.getConstant(2, MVT::i8));
1803 default: // Byte aligned
1805 Count = Op.getOperand(3);
1810 Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
1811 InFlag = Chain.getValue(1);
1812 Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
1813 InFlag = Chain.getValue(1);
1814 Chain = DAG.getCopyToReg(Chain, X86::ESI, Op.getOperand(2), InFlag);
1815 InFlag = Chain.getValue(1);
1817 return DAG.getNode(X86ISD::REP_MOVS, MVT::Other, Chain,
1818 DAG.getValueType(AVT), InFlag);
1820 case ISD::GlobalAddress: {
1822 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1823 // For Darwin, external and weak symbols are indirect, so we want to load
1824 // the value at address GV, not the value of GV itself. This means that
1825 // the GlobalAddress must be in the base or index register of the address,
1826 // not the GV offset field.
1827 if (getTargetMachine().
1828 getSubtarget<X86Subtarget>().getIndirectExternAndWeakGlobals() &&
1829 (GV->hasWeakLinkage() || GV->isExternal()))
1830 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(),
1831 DAG.getTargetGlobalAddress(GV, getPointerTy()),
1832 DAG.getSrcValue(NULL));
1835 case ISD::VASTART: {
1836 // vastart just stores the address of the VarArgsFrameIndex slot into the
1837 // memory location argument.
1838 // FIXME: Replace MVT::i32 with PointerTy
1839 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
1840 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
1841 Op.getOperand(1), Op.getOperand(2));
1846 switch(Op.getNumOperands()) {
1848 assert(0 && "Do not know how to return this many arguments!");
1851 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Op.getOperand(0),
1852 DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
1854 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
1855 if (MVT::isInteger(ArgVT))
1856 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::EAX, Op.getOperand(1),
1858 else if (!X86ScalarSSE) {
1859 std::vector<MVT::ValueType> Tys;
1860 Tys.push_back(MVT::Other);
1861 Tys.push_back(MVT::Flag);
1862 std::vector<SDOperand> Ops;
1863 Ops.push_back(Op.getOperand(0));
1864 Ops.push_back(Op.getOperand(1));
1865 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
1867 // Spill the value to memory and reload it into top of stack.
1868 unsigned Size = MVT::getSizeInBits(ArgVT)/8;
1869 MachineFunction &MF = DAG.getMachineFunction();
1870 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
1871 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1872 SDOperand Chain = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
1873 Op.getOperand(1), StackSlot,
1874 DAG.getSrcValue(0));
1875 std::vector<MVT::ValueType> Tys;
1876 Tys.push_back(MVT::f64);
1877 Tys.push_back(MVT::Other);
1878 std::vector<SDOperand> Ops;
1879 Ops.push_back(Chain);
1880 Ops.push_back(StackSlot);
1881 Ops.push_back(DAG.getValueType(ArgVT));
1882 Copy = DAG.getNode(X86ISD::FLD, Tys, Ops);
1884 Tys.push_back(MVT::Other);
1885 Tys.push_back(MVT::Flag);
1887 Ops.push_back(Copy.getValue(1));
1888 Ops.push_back(Copy);
1889 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
1894 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::EDX, Op.getOperand(2),
1896 Copy = DAG.getCopyToReg(Copy, X86::EAX,Op.getOperand(1),Copy.getValue(1));
1899 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other,
1900 Copy, DAG.getConstant(getBytesToPopOnReturn(), MVT::i16),
1906 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
1908 default: return NULL;
1909 case X86ISD::ADD_FLAG: return "X86ISD::ADD_FLAG";
1910 case X86ISD::SUB_FLAG: return "X86ISD::SUB_FLAG";
1911 case X86ISD::ADC: return "X86ISD::ADC";
1912 case X86ISD::SBB: return "X86ISD::SBB";
1913 case X86ISD::SHLD: return "X86ISD::SHLD";
1914 case X86ISD::SHRD: return "X86ISD::SHRD";
1915 case X86ISD::FILD: return "X86ISD::FILD";
1916 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
1917 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
1918 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
1919 case X86ISD::FLD: return "X86ISD::FLD";
1920 case X86ISD::FST: return "X86ISD::FST";
1921 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT";
1922 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT";
1923 case X86ISD::CALL: return "X86ISD::CALL";
1924 case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
1925 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
1926 case X86ISD::CMP: return "X86ISD::CMP";
1927 case X86ISD::TEST: return "X86ISD::TEST";
1928 case X86ISD::SETCC: return "X86ISD::SETCC";
1929 case X86ISD::CMOV: return "X86ISD::CMOV";
1930 case X86ISD::BRCOND: return "X86ISD::BRCOND";
1931 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
1932 case X86ISD::REP_STOS: return "X86ISD::RET_STOS";
1933 case X86ISD::REP_MOVS: return "X86ISD::RET_MOVS";
1937 bool X86TargetLowering::isMaskedValueZeroForTargetNode(const SDOperand &Op,
1938 uint64_t Mask) const {
1940 unsigned Opc = Op.getOpcode();
1944 assert(Opc >= ISD::BUILTIN_OP_END && "Expected a target specific node");
1946 case X86ISD::SETCC: return (Mask & 1) == 0;