1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
16 #include "X86InstrBuilder.h"
17 #include "X86ISelLowering.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Function.h"
22 #include "llvm/ADT/VectorExtras.h"
23 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/SSARegMap.h"
29 #include "llvm/Support/MathExtras.h"
30 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
36 cl::desc("Enable fastcc on X86"));
38 X86TargetLowering::X86TargetLowering(TargetMachine &TM)
39 : TargetLowering(TM) {
40 Subtarget = &TM.getSubtarget<X86Subtarget>();
41 X86ScalarSSE = Subtarget->hasSSE2();
43 // Set up the TargetLowering object.
45 // X86 is weird, it always uses i8 for shift amounts and setcc results.
46 setShiftAmountType(MVT::i8);
47 setSetCCResultType(MVT::i8);
48 setSetCCResultContents(ZeroOrOneSetCCResult);
49 setSchedulingPreference(SchedulingForRegPressure);
50 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
51 setStackPointerRegisterToSaveRestore(X86::ESP);
53 // Add legal addressing mode scale values.
54 addLegalAddressScale(8);
55 addLegalAddressScale(4);
56 addLegalAddressScale(2);
57 // Enter the ones which require both scale + index last. These are more
59 addLegalAddressScale(9);
60 addLegalAddressScale(5);
61 addLegalAddressScale(3);
63 // Set up the register classes.
64 addRegisterClass(MVT::i8, X86::R8RegisterClass);
65 addRegisterClass(MVT::i16, X86::R16RegisterClass);
66 addRegisterClass(MVT::i32, X86::R32RegisterClass);
68 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
70 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
71 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
72 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
75 // No SSE i64 SINT_TO_FP, so expand i32 UINT_TO_FP instead.
76 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
78 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
80 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
82 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
83 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
84 // SSE has no i16 to fp conversion, only i32
86 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
88 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
89 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
92 // We can handle SINT_TO_FP and FP_TO_SINT from/to i64 even though i64
94 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
95 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
97 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
99 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
100 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
103 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
105 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
106 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
109 // Handle FP_TO_UINT by promoting the destination to a larger signed
111 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
112 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
113 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
115 if (X86ScalarSSE && !Subtarget->hasSSE3())
116 // Expand FP_TO_UINT into a select.
117 // FIXME: We would like to use a Custom expander here eventually to do
118 // the optimal thing for SSE vs. the default expansion in the legalizer.
119 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
121 // With SSE3 we can use fisttpll to convert to a signed i64.
122 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
124 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
125 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
127 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
128 setOperationAction(ISD::BR_CC , MVT::Other, Expand);
129 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
130 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
131 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
132 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
133 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
134 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
135 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
136 setOperationAction(ISD::FREM , MVT::f64 , Expand);
137 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
138 setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
139 setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
140 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
141 setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
142 setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
143 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
144 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
145 setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
146 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
147 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
149 // These should be promoted to a larger select which is supported.
150 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
151 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
153 // X86 wants to expand cmov itself.
154 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
155 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
156 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
157 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
158 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
159 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
160 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
161 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
162 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
163 // X86 ret instruction may pop stack.
164 setOperationAction(ISD::RET , MVT::Other, Custom);
166 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
167 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
168 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
169 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
170 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
171 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
172 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
173 // X86 wants to expand memset / memcpy itself.
174 setOperationAction(ISD::MEMSET , MVT::Other, Custom);
175 setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
177 // We don't have line number support yet.
178 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
179 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
180 // FIXME - use subtarget debug flags
181 if (!TM.getSubtarget<X86Subtarget>().isTargetDarwin())
182 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
184 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
185 setOperationAction(ISD::VASTART , MVT::Other, Custom);
187 // Use the default implementation.
188 setOperationAction(ISD::VAARG , MVT::Other, Expand);
189 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
190 setOperationAction(ISD::VAEND , MVT::Other, Expand);
191 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
192 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
193 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
195 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
196 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
199 // Set up the FP register classes.
200 addRegisterClass(MVT::f32, X86::FR32RegisterClass);
201 addRegisterClass(MVT::f64, X86::FR64RegisterClass);
203 // SSE has no load+extend ops
204 setOperationAction(ISD::EXTLOAD, MVT::f32, Expand);
205 setOperationAction(ISD::ZEXTLOAD, MVT::f32, Expand);
207 // Use ANDPD to simulate FABS.
208 setOperationAction(ISD::FABS , MVT::f64, Custom);
209 setOperationAction(ISD::FABS , MVT::f32, Custom);
211 // Use XORP to simulate FNEG.
212 setOperationAction(ISD::FNEG , MVT::f64, Custom);
213 setOperationAction(ISD::FNEG , MVT::f32, Custom);
215 // We don't support sin/cos/fmod
216 setOperationAction(ISD::FSIN , MVT::f64, Expand);
217 setOperationAction(ISD::FCOS , MVT::f64, Expand);
218 setOperationAction(ISD::FREM , MVT::f64, Expand);
219 setOperationAction(ISD::FSIN , MVT::f32, Expand);
220 setOperationAction(ISD::FCOS , MVT::f32, Expand);
221 setOperationAction(ISD::FREM , MVT::f32, Expand);
223 // Expand FP immediates into loads from the stack, except for the special
225 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
226 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
227 addLegalFPImmediate(+0.0); // xorps / xorpd
229 // Set up the FP register classes.
230 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
232 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
235 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
236 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
239 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
240 addLegalFPImmediate(+0.0); // FLD0
241 addLegalFPImmediate(+1.0); // FLD1
242 addLegalFPImmediate(-0.0); // FLD0/FCHS
243 addLegalFPImmediate(-1.0); // FLD1/FCHS
246 // First set operation action for all vector types to expand. Then we
247 // will selectively turn on ones that can be effectively codegen'd.
248 for (unsigned VT = (unsigned)MVT::Vector + 1;
249 VT != (unsigned)MVT::LAST_VALUETYPE; VT++) {
250 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
251 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
252 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
253 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand);
256 if (TM.getSubtarget<X86Subtarget>().hasMMX()) {
257 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
258 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
259 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
261 // FIXME: add MMX packed arithmetics
262 setOperationAction(ISD::ConstantVec, MVT::v8i8, Expand);
263 setOperationAction(ISD::ConstantVec, MVT::v4i16, Expand);
264 setOperationAction(ISD::ConstantVec, MVT::v2i32, Expand);
267 if (TM.getSubtarget<X86Subtarget>().hasSSE1()) {
268 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
270 setOperationAction(ISD::ADD , MVT::v4f32, Legal);
271 setOperationAction(ISD::SUB , MVT::v4f32, Legal);
272 setOperationAction(ISD::MUL , MVT::v4f32, Legal);
273 setOperationAction(ISD::LOAD , MVT::v4f32, Legal);
274 setOperationAction(ISD::ConstantVec, MVT::v4f32, Expand);
277 if (TM.getSubtarget<X86Subtarget>().hasSSE2()) {
278 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
279 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
280 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
281 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
282 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
285 setOperationAction(ISD::ADD , MVT::v2f64, Legal);
286 setOperationAction(ISD::SUB , MVT::v2f64, Legal);
287 setOperationAction(ISD::MUL , MVT::v2f64, Legal);
288 setOperationAction(ISD::LOAD , MVT::v2f64, Legal);
289 setOperationAction(ISD::ConstantVec, MVT::v2f64, Expand);
290 setOperationAction(ISD::ConstantVec, MVT::v16i8, Expand);
291 setOperationAction(ISD::ConstantVec, MVT::v8i16, Expand);
292 setOperationAction(ISD::ConstantVec, MVT::v4i32, Expand);
293 setOperationAction(ISD::ConstantVec, MVT::v2i64, Expand);
296 computeRegisterProperties();
298 // FIXME: These should be based on subtarget info. Plus, the values should
299 // be smaller when we are in optimizing for size mode.
300 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores
301 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores
302 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores
303 allowUnalignedMemoryAccesses = true; // x86 supports it!
306 std::vector<SDOperand>
307 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
308 if (F.getCallingConv() == CallingConv::Fast && EnableFastCC)
309 return LowerFastCCArguments(F, DAG);
310 return LowerCCCArguments(F, DAG);
313 std::pair<SDOperand, SDOperand>
314 X86TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
315 bool isVarArg, unsigned CallingConv,
317 SDOperand Callee, ArgListTy &Args,
319 assert((!isVarArg || CallingConv == CallingConv::C) &&
320 "Only C takes varargs!");
322 // If the callee is a GlobalAddress node (quite common, every direct call is)
323 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
324 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
325 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
326 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
327 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
329 if (CallingConv == CallingConv::Fast && EnableFastCC)
330 return LowerFastCCCallTo(Chain, RetTy, isTailCall, Callee, Args, DAG);
331 return LowerCCCCallTo(Chain, RetTy, isVarArg, isTailCall, Callee, Args, DAG);
334 //===----------------------------------------------------------------------===//
335 // C Calling Convention implementation
336 //===----------------------------------------------------------------------===//
338 std::vector<SDOperand>
339 X86TargetLowering::LowerCCCArguments(Function &F, SelectionDAG &DAG) {
340 std::vector<SDOperand> ArgValues;
342 MachineFunction &MF = DAG.getMachineFunction();
343 MachineFrameInfo *MFI = MF.getFrameInfo();
345 // Add DAG nodes to load the arguments... On entry to a function on the X86,
346 // the stack frame looks like this:
348 // [ESP] -- return address
349 // [ESP + 4] -- first argument (leftmost lexically)
350 // [ESP + 8] -- second argument, if first argument is four bytes in size
353 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
354 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
355 MVT::ValueType ObjectVT = getValueType(I->getType());
356 unsigned ArgIncrement = 4;
359 default: assert(0 && "Unhandled argument type!");
361 case MVT::i8: ObjSize = 1; break;
362 case MVT::i16: ObjSize = 2; break;
363 case MVT::i32: ObjSize = 4; break;
364 case MVT::i64: ObjSize = ArgIncrement = 8; break;
365 case MVT::f32: ObjSize = 4; break;
366 case MVT::f64: ObjSize = ArgIncrement = 8; break;
368 // Create the frame index object for this incoming parameter...
369 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
371 // Create the SelectionDAG nodes corresponding to a load from this parameter
372 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
374 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
378 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
379 DAG.getSrcValue(NULL));
381 if (MVT::isInteger(ObjectVT))
382 ArgValue = DAG.getConstant(0, ObjectVT);
384 ArgValue = DAG.getConstantFP(0, ObjectVT);
386 ArgValues.push_back(ArgValue);
388 ArgOffset += ArgIncrement; // Move on to the next argument...
391 // If the function takes variable number of arguments, make a frame index for
392 // the start of the first vararg value... for expansion of llvm.va_start.
394 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
395 ReturnAddrIndex = 0; // No return address slot generated yet.
396 BytesToPopOnReturn = 0; // Callee pops nothing.
397 BytesCallerReserves = ArgOffset;
399 // Finally, inform the code generator which regs we return values in.
400 switch (getValueType(F.getReturnType())) {
401 default: assert(0 && "Unknown type!");
402 case MVT::isVoid: break;
407 MF.addLiveOut(X86::EAX);
410 MF.addLiveOut(X86::EAX);
411 MF.addLiveOut(X86::EDX);
415 MF.addLiveOut(X86::ST0);
421 std::pair<SDOperand, SDOperand>
422 X86TargetLowering::LowerCCCCallTo(SDOperand Chain, const Type *RetTy,
423 bool isVarArg, bool isTailCall,
424 SDOperand Callee, ArgListTy &Args,
426 // Count how many bytes are to be pushed on the stack.
427 unsigned NumBytes = 0;
431 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(0, getPointerTy()));
433 for (unsigned i = 0, e = Args.size(); i != e; ++i)
434 switch (getValueType(Args[i].second)) {
435 default: assert(0 && "Unknown value type!");
449 Chain = DAG.getCALLSEQ_START(Chain,
450 DAG.getConstant(NumBytes, getPointerTy()));
452 // Arguments go on the stack in reverse order, as specified by the ABI.
453 unsigned ArgOffset = 0;
454 SDOperand StackPtr = DAG.getRegister(X86::ESP, MVT::i32);
455 std::vector<SDOperand> Stores;
457 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
458 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
459 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
461 switch (getValueType(Args[i].second)) {
462 default: assert(0 && "Unexpected ValueType for argument!");
466 // Promote the integer to 32 bits. If the input type is signed use a
467 // sign extend, otherwise use a zero extend.
468 if (Args[i].second->isSigned())
469 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
471 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
476 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
477 Args[i].first, PtrOff,
478 DAG.getSrcValue(NULL)));
483 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
484 Args[i].first, PtrOff,
485 DAG.getSrcValue(NULL)));
490 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
493 std::vector<MVT::ValueType> RetVals;
494 MVT::ValueType RetTyVT = getValueType(RetTy);
495 RetVals.push_back(MVT::Other);
497 // The result values produced have to be legal. Promote the result.
499 case MVT::isVoid: break;
501 RetVals.push_back(RetTyVT);
506 RetVals.push_back(MVT::i32);
510 RetVals.push_back(MVT::f32);
512 RetVals.push_back(MVT::f64);
515 RetVals.push_back(MVT::i32);
516 RetVals.push_back(MVT::i32);
520 std::vector<MVT::ValueType> NodeTys;
521 NodeTys.push_back(MVT::Other); // Returns a chain
522 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
523 std::vector<SDOperand> Ops;
524 Ops.push_back(Chain);
525 Ops.push_back(Callee);
527 // FIXME: Do not generate X86ISD::TAILCALL for now.
528 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
529 SDOperand InFlag = Chain.getValue(1);
532 NodeTys.push_back(MVT::Other); // Returns a chain
533 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
535 Ops.push_back(Chain);
536 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
537 Ops.push_back(DAG.getConstant(0, getPointerTy()));
538 Ops.push_back(InFlag);
539 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, Ops);
540 InFlag = Chain.getValue(1);
543 if (RetTyVT != MVT::isVoid) {
545 default: assert(0 && "Unknown value type to return!");
548 RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
549 Chain = RetVal.getValue(1);
550 if (RetTyVT == MVT::i1)
551 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::i1, RetVal);
554 RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
555 Chain = RetVal.getValue(1);
558 RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
559 Chain = RetVal.getValue(1);
562 SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
563 SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
565 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
566 Chain = Hi.getValue(1);
571 std::vector<MVT::ValueType> Tys;
572 Tys.push_back(MVT::f64);
573 Tys.push_back(MVT::Other);
574 Tys.push_back(MVT::Flag);
575 std::vector<SDOperand> Ops;
576 Ops.push_back(Chain);
577 Ops.push_back(InFlag);
578 RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
579 Chain = RetVal.getValue(1);
580 InFlag = RetVal.getValue(2);
582 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
583 // shouldn't be necessary except that RFP cannot be live across
584 // multiple blocks. When stackifier is fixed, they can be uncoupled.
585 MachineFunction &MF = DAG.getMachineFunction();
586 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
587 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
589 Tys.push_back(MVT::Other);
591 Ops.push_back(Chain);
592 Ops.push_back(RetVal);
593 Ops.push_back(StackSlot);
594 Ops.push_back(DAG.getValueType(RetTyVT));
595 Ops.push_back(InFlag);
596 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
597 RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
598 DAG.getSrcValue(NULL));
599 Chain = RetVal.getValue(1);
602 if (RetTyVT == MVT::f32 && !X86ScalarSSE)
603 // FIXME: we would really like to remember that this FP_ROUND
604 // operation is okay to eliminate if we allow excess FP precision.
605 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
611 return std::make_pair(RetVal, Chain);
614 //===----------------------------------------------------------------------===//
615 // Fast Calling Convention implementation
616 //===----------------------------------------------------------------------===//
618 // The X86 'fast' calling convention passes up to two integer arguments in
619 // registers (an appropriate portion of EAX/EDX), passes arguments in C order,
620 // and requires that the callee pop its arguments off the stack (allowing proper
621 // tail calls), and has the same return value conventions as C calling convs.
623 // This calling convention always arranges for the callee pop value to be 8n+4
624 // bytes, which is needed for tail recursion elimination and stack alignment
627 // Note that this can be enhanced in the future to pass fp vals in registers
628 // (when we have a global fp allocator) and do other tricks.
631 /// AddLiveIn - This helper function adds the specified physical register to the
632 /// MachineFunction as a live in value. It also creates a corresponding virtual
634 static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
635 TargetRegisterClass *RC) {
636 assert(RC->contains(PReg) && "Not the correct regclass!");
637 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
638 MF.addLiveIn(PReg, VReg);
642 // FASTCC_NUM_INT_ARGS_INREGS - This is the max number of integer arguments
643 // to pass in registers. 0 is none, 1 is is "use EAX", 2 is "use EAX and
644 // EDX". Anything more is illegal.
646 // FIXME: The linscan register allocator currently has problem with
647 // coallescing. At the time of this writing, whenever it decides to coallesce
648 // a physreg with a virtreg, this increases the size of the physreg's live
649 // range, and the live range cannot ever be reduced. This causes problems if
650 // too many physregs are coalleced with virtregs, which can cause the register
651 // allocator to wedge itself.
653 // This code triggers this problem more often if we pass args in registers,
654 // so disable it until this is fixed.
656 // NOTE: this isn't marked const, so that GCC doesn't emit annoying warnings
657 // about code being dead.
659 static unsigned FASTCC_NUM_INT_ARGS_INREGS = 0;
662 std::vector<SDOperand>
663 X86TargetLowering::LowerFastCCArguments(Function &F, SelectionDAG &DAG) {
664 std::vector<SDOperand> ArgValues;
666 MachineFunction &MF = DAG.getMachineFunction();
667 MachineFrameInfo *MFI = MF.getFrameInfo();
669 // Add DAG nodes to load the arguments... On entry to a function the stack
670 // frame looks like this:
672 // [ESP] -- return address
673 // [ESP + 4] -- first nonreg argument (leftmost lexically)
674 // [ESP + 8] -- second nonreg argument, if first argument is 4 bytes in size
676 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
678 // Keep track of the number of integer regs passed so far. This can be either
679 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
681 unsigned NumIntRegs = 0;
683 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
684 MVT::ValueType ObjectVT = getValueType(I->getType());
685 unsigned ArgIncrement = 4;
686 unsigned ObjSize = 0;
690 default: assert(0 && "Unhandled argument type!");
693 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
694 if (!I->use_empty()) {
695 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
696 X86::R8RegisterClass);
697 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i8);
698 DAG.setRoot(ArgValue.getValue(1));
699 if (ObjectVT == MVT::i1)
700 // FIXME: Should insert a assertzext here.
701 ArgValue = DAG.getNode(ISD::TRUNCATE, MVT::i1, ArgValue);
710 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
711 if (!I->use_empty()) {
712 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
713 X86::R16RegisterClass);
714 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i16);
715 DAG.setRoot(ArgValue.getValue(1));
723 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
724 if (!I->use_empty()) {
725 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
726 X86::R32RegisterClass);
727 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
728 DAG.setRoot(ArgValue.getValue(1));
736 if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
737 if (!I->use_empty()) {
738 unsigned BotReg = AddLiveIn(MF, X86::EAX, X86::R32RegisterClass);
739 unsigned TopReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
741 SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
742 SDOperand Hi = DAG.getCopyFromReg(Low.getValue(1), TopReg, MVT::i32);
743 DAG.setRoot(Hi.getValue(1));
745 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
749 } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
750 if (!I->use_empty()) {
751 unsigned BotReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
752 SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
753 DAG.setRoot(Low.getValue(1));
755 // Load the high part from memory.
756 // Create the frame index object for this incoming parameter...
757 int FI = MFI->CreateFixedObject(4, ArgOffset);
758 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
759 SDOperand Hi = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
760 DAG.getSrcValue(NULL));
761 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
764 NumIntRegs = FASTCC_NUM_INT_ARGS_INREGS;
767 ObjSize = ArgIncrement = 8;
769 case MVT::f32: ObjSize = 4; break;
770 case MVT::f64: ObjSize = ArgIncrement = 8; break;
773 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
775 if (ObjSize && !I->use_empty()) {
776 // Create the frame index object for this incoming parameter...
777 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
779 // Create the SelectionDAG nodes corresponding to a load from this
781 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
783 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
784 DAG.getSrcValue(NULL));
785 } else if (ArgValue.Val == 0) {
786 if (MVT::isInteger(ObjectVT))
787 ArgValue = DAG.getConstant(0, ObjectVT);
789 ArgValue = DAG.getConstantFP(0, ObjectVT);
791 ArgValues.push_back(ArgValue);
794 ArgOffset += ArgIncrement; // Move on to the next argument.
797 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
798 // arguments and the arguments after the retaddr has been pushed are aligned.
799 if ((ArgOffset & 7) == 0)
802 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
803 ReturnAddrIndex = 0; // No return address slot generated yet.
804 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
805 BytesCallerReserves = 0;
807 // Finally, inform the code generator which regs we return values in.
808 switch (getValueType(F.getReturnType())) {
809 default: assert(0 && "Unknown type!");
810 case MVT::isVoid: break;
815 MF.addLiveOut(X86::EAX);
818 MF.addLiveOut(X86::EAX);
819 MF.addLiveOut(X86::EDX);
823 MF.addLiveOut(X86::ST0);
829 std::pair<SDOperand, SDOperand>
830 X86TargetLowering::LowerFastCCCallTo(SDOperand Chain, const Type *RetTy,
831 bool isTailCall, SDOperand Callee,
832 ArgListTy &Args, SelectionDAG &DAG) {
833 // Count how many bytes are to be pushed on the stack.
834 unsigned NumBytes = 0;
836 // Keep track of the number of integer regs passed so far. This can be either
837 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
839 unsigned NumIntRegs = 0;
841 for (unsigned i = 0, e = Args.size(); i != e; ++i)
842 switch (getValueType(Args[i].second)) {
843 default: assert(0 && "Unknown value type!");
848 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
857 if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
860 } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
861 NumIntRegs = FASTCC_NUM_INT_ARGS_INREGS;
872 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
873 // arguments and the arguments after the retaddr has been pushed are aligned.
874 if ((NumBytes & 7) == 0)
877 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
879 // Arguments go on the stack in reverse order, as specified by the ABI.
880 unsigned ArgOffset = 0;
881 SDOperand StackPtr = DAG.getRegister(X86::ESP, MVT::i32);
883 std::vector<SDOperand> Stores;
884 std::vector<SDOperand> RegValuesToPass;
885 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
886 switch (getValueType(Args[i].second)) {
887 default: assert(0 && "Unexpected ValueType for argument!");
889 Args[i].first = DAG.getNode(ISD::ANY_EXTEND, MVT::i8, Args[i].first);
894 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
895 RegValuesToPass.push_back(Args[i].first);
901 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
902 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
903 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
904 Args[i].first, PtrOff,
905 DAG.getSrcValue(NULL)));
910 // Can pass (at least) part of it in regs?
911 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
912 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
913 Args[i].first, DAG.getConstant(1, MVT::i32));
914 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
915 Args[i].first, DAG.getConstant(0, MVT::i32));
916 RegValuesToPass.push_back(Lo);
919 // Pass both parts in regs?
920 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
921 RegValuesToPass.push_back(Hi);
924 // Pass the high part in memory.
925 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
926 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
927 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
928 Hi, PtrOff, DAG.getSrcValue(NULL)));
935 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
936 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
937 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
938 Args[i].first, PtrOff,
939 DAG.getSrcValue(NULL)));
945 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
947 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
948 // arguments and the arguments after the retaddr has been pushed are aligned.
949 if ((ArgOffset & 7) == 0)
952 std::vector<MVT::ValueType> RetVals;
953 MVT::ValueType RetTyVT = getValueType(RetTy);
955 RetVals.push_back(MVT::Other);
957 // The result values produced have to be legal. Promote the result.
959 case MVT::isVoid: break;
961 RetVals.push_back(RetTyVT);
966 RetVals.push_back(MVT::i32);
970 RetVals.push_back(MVT::f32);
972 RetVals.push_back(MVT::f64);
975 RetVals.push_back(MVT::i32);
976 RetVals.push_back(MVT::i32);
980 // Build a sequence of copy-to-reg nodes chained together with token chain
981 // and flag operands which copy the outgoing args into registers.
983 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
985 SDOperand RegToPass = RegValuesToPass[i];
986 switch (RegToPass.getValueType()) {
987 default: assert(0 && "Bad thing to pass in regs");
989 CCReg = (i == 0) ? X86::AL : X86::DL;
992 CCReg = (i == 0) ? X86::AX : X86::DX;
995 CCReg = (i == 0) ? X86::EAX : X86::EDX;
999 Chain = DAG.getCopyToReg(Chain, CCReg, RegToPass, InFlag);
1000 InFlag = Chain.getValue(1);
1003 std::vector<MVT::ValueType> NodeTys;
1004 NodeTys.push_back(MVT::Other); // Returns a chain
1005 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1006 std::vector<SDOperand> Ops;
1007 Ops.push_back(Chain);
1008 Ops.push_back(Callee);
1010 Ops.push_back(InFlag);
1012 // FIXME: Do not generate X86ISD::TAILCALL for now.
1013 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
1014 InFlag = Chain.getValue(1);
1017 NodeTys.push_back(MVT::Other); // Returns a chain
1018 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1020 Ops.push_back(Chain);
1021 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
1022 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
1023 Ops.push_back(InFlag);
1024 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, Ops);
1025 InFlag = Chain.getValue(1);
1028 if (RetTyVT != MVT::isVoid) {
1030 default: assert(0 && "Unknown value type to return!");
1033 RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
1034 Chain = RetVal.getValue(1);
1035 if (RetTyVT == MVT::i1)
1036 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::i1, RetVal);
1039 RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
1040 Chain = RetVal.getValue(1);
1043 RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
1044 Chain = RetVal.getValue(1);
1047 SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
1048 SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
1050 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
1051 Chain = Hi.getValue(1);
1056 std::vector<MVT::ValueType> Tys;
1057 Tys.push_back(MVT::f64);
1058 Tys.push_back(MVT::Other);
1059 Tys.push_back(MVT::Flag);
1060 std::vector<SDOperand> Ops;
1061 Ops.push_back(Chain);
1062 Ops.push_back(InFlag);
1063 RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
1064 Chain = RetVal.getValue(1);
1065 InFlag = RetVal.getValue(2);
1067 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
1068 // shouldn't be necessary except that RFP cannot be live across
1069 // multiple blocks. When stackifier is fixed, they can be uncoupled.
1070 MachineFunction &MF = DAG.getMachineFunction();
1071 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
1072 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1074 Tys.push_back(MVT::Other);
1076 Ops.push_back(Chain);
1077 Ops.push_back(RetVal);
1078 Ops.push_back(StackSlot);
1079 Ops.push_back(DAG.getValueType(RetTyVT));
1080 Ops.push_back(InFlag);
1081 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
1082 RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
1083 DAG.getSrcValue(NULL));
1084 Chain = RetVal.getValue(1);
1087 if (RetTyVT == MVT::f32 && !X86ScalarSSE)
1088 // FIXME: we would really like to remember that this FP_ROUND
1089 // operation is okay to eliminate if we allow excess FP precision.
1090 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
1096 return std::make_pair(RetVal, Chain);
1099 SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
1100 if (ReturnAddrIndex == 0) {
1101 // Set up a frame object for the return address.
1102 MachineFunction &MF = DAG.getMachineFunction();
1103 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
1106 return DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
1111 std::pair<SDOperand, SDOperand> X86TargetLowering::
1112 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
1113 SelectionDAG &DAG) {
1115 if (Depth) // Depths > 0 not supported yet!
1116 Result = DAG.getConstant(0, getPointerTy());
1118 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
1119 if (!isFrameAddress)
1120 // Just load the return address
1121 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI,
1122 DAG.getSrcValue(NULL));
1124 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
1125 DAG.getConstant(4, MVT::i32));
1127 return std::make_pair(Result, Chain);
1130 /// getCondBrOpcodeForX86CC - Returns the X86 conditional branch opcode
1131 /// which corresponds to the condition code.
1132 static unsigned getCondBrOpcodeForX86CC(unsigned X86CC) {
1134 default: assert(0 && "Unknown X86 conditional code!");
1135 case X86ISD::COND_A: return X86::JA;
1136 case X86ISD::COND_AE: return X86::JAE;
1137 case X86ISD::COND_B: return X86::JB;
1138 case X86ISD::COND_BE: return X86::JBE;
1139 case X86ISD::COND_E: return X86::JE;
1140 case X86ISD::COND_G: return X86::JG;
1141 case X86ISD::COND_GE: return X86::JGE;
1142 case X86ISD::COND_L: return X86::JL;
1143 case X86ISD::COND_LE: return X86::JLE;
1144 case X86ISD::COND_NE: return X86::JNE;
1145 case X86ISD::COND_NO: return X86::JNO;
1146 case X86ISD::COND_NP: return X86::JNP;
1147 case X86ISD::COND_NS: return X86::JNS;
1148 case X86ISD::COND_O: return X86::JO;
1149 case X86ISD::COND_P: return X86::JP;
1150 case X86ISD::COND_S: return X86::JS;
1154 /// translateX86CC - do a one to one translation of a ISD::CondCode to the X86
1155 /// specific condition code. It returns a false if it cannot do a direct
1156 /// translation. X86CC is the translated CondCode. Flip is set to true if the
1157 /// the order of comparison operands should be flipped.
1158 static bool translateX86CC(SDOperand CC, bool isFP, unsigned &X86CC,
1160 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
1162 X86CC = X86ISD::COND_INVALID;
1164 switch (SetCCOpcode) {
1166 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
1167 case ISD::SETGT: X86CC = X86ISD::COND_G; break;
1168 case ISD::SETGE: X86CC = X86ISD::COND_GE; break;
1169 case ISD::SETLT: X86CC = X86ISD::COND_L; break;
1170 case ISD::SETLE: X86CC = X86ISD::COND_LE; break;
1171 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1172 case ISD::SETULT: X86CC = X86ISD::COND_B; break;
1173 case ISD::SETUGT: X86CC = X86ISD::COND_A; break;
1174 case ISD::SETULE: X86CC = X86ISD::COND_BE; break;
1175 case ISD::SETUGE: X86CC = X86ISD::COND_AE; break;
1178 // On a floating point condition, the flags are set as follows:
1180 // 0 | 0 | 0 | X > Y
1181 // 0 | 0 | 1 | X < Y
1182 // 1 | 0 | 0 | X == Y
1183 // 1 | 1 | 1 | unordered
1184 switch (SetCCOpcode) {
1187 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
1188 case ISD::SETOLE: Flip = true; // Fallthrough
1190 case ISD::SETGT: X86CC = X86ISD::COND_A; break;
1191 case ISD::SETOLT: Flip = true; // Fallthrough
1193 case ISD::SETGE: X86CC = X86ISD::COND_AE; break;
1194 case ISD::SETUGE: Flip = true; // Fallthrough
1196 case ISD::SETLT: X86CC = X86ISD::COND_B; break;
1197 case ISD::SETUGT: Flip = true; // Fallthrough
1199 case ISD::SETLE: X86CC = X86ISD::COND_BE; break;
1201 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1202 case ISD::SETUO: X86CC = X86ISD::COND_P; break;
1203 case ISD::SETO: X86CC = X86ISD::COND_NP; break;
1207 return X86CC != X86ISD::COND_INVALID;
1210 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
1211 /// code. Current x86 isa includes the following FP cmov instructions:
1212 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
1213 static bool hasFPCMov(unsigned X86CC) {
1217 case X86ISD::COND_B:
1218 case X86ISD::COND_BE:
1219 case X86ISD::COND_E:
1220 case X86ISD::COND_P:
1221 case X86ISD::COND_A:
1222 case X86ISD::COND_AE:
1223 case X86ISD::COND_NE:
1224 case X86ISD::COND_NP:
1230 X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1231 MachineBasicBlock *BB) {
1232 switch (MI->getOpcode()) {
1233 default: assert(false && "Unexpected instr type to insert");
1234 case X86::CMOV_FR32:
1235 case X86::CMOV_FR64: {
1236 // To "insert" a SELECT_CC instruction, we actually have to insert the
1237 // diamond control-flow pattern. The incoming instruction knows the
1238 // destination vreg to set, the condition code register to branch on, the
1239 // true/false values to select between, and a branch opcode to use.
1240 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1241 ilist<MachineBasicBlock>::iterator It = BB;
1247 // cmpTY ccX, r1, r2
1249 // fallthrough --> copy0MBB
1250 MachineBasicBlock *thisMBB = BB;
1251 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
1252 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
1253 unsigned Opc = getCondBrOpcodeForX86CC(MI->getOperand(3).getImmedValue());
1254 BuildMI(BB, Opc, 1).addMBB(sinkMBB);
1255 MachineFunction *F = BB->getParent();
1256 F->getBasicBlockList().insert(It, copy0MBB);
1257 F->getBasicBlockList().insert(It, sinkMBB);
1258 // Update machine-CFG edges
1259 BB->addSuccessor(copy0MBB);
1260 BB->addSuccessor(sinkMBB);
1263 // %FalseValue = ...
1264 // # fallthrough to sinkMBB
1267 // Update machine-CFG edges
1268 BB->addSuccessor(sinkMBB);
1271 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1274 BuildMI(BB, X86::PHI, 4, MI->getOperand(0).getReg())
1275 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
1276 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1278 delete MI; // The pseudo instruction is gone now.
1282 case X86::FP_TO_INT16_IN_MEM:
1283 case X86::FP_TO_INT32_IN_MEM:
1284 case X86::FP_TO_INT64_IN_MEM: {
1285 // Change the floating point control register to use "round towards zero"
1286 // mode when truncating to an integer value.
1287 MachineFunction *F = BB->getParent();
1288 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
1289 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1291 // Load the old value of the high byte of the control word...
1293 F->getSSARegMap()->createVirtualRegister(X86::R16RegisterClass);
1294 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
1296 // Set the high part to be round to zero...
1297 addFrameReference(BuildMI(BB, X86::MOV16mi, 5), CWFrameIdx).addImm(0xC7F);
1299 // Reload the modified control word now...
1300 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1302 // Restore the memory image of control word to original value
1303 addFrameReference(BuildMI(BB, X86::MOV16mr, 5), CWFrameIdx).addReg(OldCW);
1305 // Get the X86 opcode to use.
1307 switch (MI->getOpcode()) {
1308 default: assert(0 && "illegal opcode!");
1309 case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break;
1310 case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break;
1311 case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break;
1315 MachineOperand &Op = MI->getOperand(0);
1316 if (Op.isRegister()) {
1317 AM.BaseType = X86AddressMode::RegBase;
1318 AM.Base.Reg = Op.getReg();
1320 AM.BaseType = X86AddressMode::FrameIndexBase;
1321 AM.Base.FrameIndex = Op.getFrameIndex();
1323 Op = MI->getOperand(1);
1324 if (Op.isImmediate())
1325 AM.Scale = Op.getImmedValue();
1326 Op = MI->getOperand(2);
1327 if (Op.isImmediate())
1328 AM.IndexReg = Op.getImmedValue();
1329 Op = MI->getOperand(3);
1330 if (Op.isGlobalAddress()) {
1331 AM.GV = Op.getGlobal();
1333 AM.Disp = Op.getImmedValue();
1335 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(MI->getOperand(4).getReg());
1337 // Reload the original control word now.
1338 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1340 delete MI; // The pseudo instruction is gone now.
1347 //===----------------------------------------------------------------------===//
1348 // X86 Custom Lowering Hooks
1349 //===----------------------------------------------------------------------===//
1351 /// DarwinGVRequiresExtraLoad - true if accessing the GV requires an extra
1352 /// load. For Darwin, external and weak symbols are indirect, loading the value
1353 /// at address GV rather then the value of GV itself. This means that the
1354 /// GlobalAddress must be in the base or index register of the address, not the
1355 /// GV offset field.
1356 static bool DarwinGVRequiresExtraLoad(GlobalValue *GV) {
1357 return (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
1358 (GV->isExternal() && !GV->hasNotBeenReadFromBytecode()));
1361 /// LowerOperation - Provide custom lowering hooks for some operations.
1363 SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
1364 switch (Op.getOpcode()) {
1365 default: assert(0 && "Should not custom lower this!");
1366 case ISD::SHL_PARTS:
1367 case ISD::SRA_PARTS:
1368 case ISD::SRL_PARTS: {
1369 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
1370 "Not an i64 shift!");
1371 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
1372 SDOperand ShOpLo = Op.getOperand(0);
1373 SDOperand ShOpHi = Op.getOperand(1);
1374 SDOperand ShAmt = Op.getOperand(2);
1375 SDOperand Tmp1 = isSRA ? DAG.getNode(ISD::SRA, MVT::i32, ShOpHi,
1376 DAG.getConstant(31, MVT::i8))
1377 : DAG.getConstant(0, MVT::i32);
1379 SDOperand Tmp2, Tmp3;
1380 if (Op.getOpcode() == ISD::SHL_PARTS) {
1381 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt);
1382 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt);
1384 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt);
1385 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt);
1388 SDOperand InFlag = DAG.getNode(X86ISD::TEST, MVT::Flag,
1389 ShAmt, DAG.getConstant(32, MVT::i8));
1392 SDOperand CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
1394 std::vector<MVT::ValueType> Tys;
1395 Tys.push_back(MVT::i32);
1396 Tys.push_back(MVT::Flag);
1397 std::vector<SDOperand> Ops;
1398 if (Op.getOpcode() == ISD::SHL_PARTS) {
1399 Ops.push_back(Tmp2);
1400 Ops.push_back(Tmp3);
1402 Ops.push_back(InFlag);
1403 Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1404 InFlag = Hi.getValue(1);
1407 Ops.push_back(Tmp3);
1408 Ops.push_back(Tmp1);
1410 Ops.push_back(InFlag);
1411 Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1413 Ops.push_back(Tmp2);
1414 Ops.push_back(Tmp3);
1416 Ops.push_back(InFlag);
1417 Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1418 InFlag = Lo.getValue(1);
1421 Ops.push_back(Tmp3);
1422 Ops.push_back(Tmp1);
1424 Ops.push_back(InFlag);
1425 Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1429 Tys.push_back(MVT::i32);
1430 Tys.push_back(MVT::i32);
1434 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
1436 case ISD::SINT_TO_FP: {
1437 assert(Op.getOperand(0).getValueType() <= MVT::i64 &&
1438 Op.getOperand(0).getValueType() >= MVT::i16 &&
1439 "Unknown SINT_TO_FP to lower!");
1442 MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
1443 unsigned Size = MVT::getSizeInBits(SrcVT)/8;
1444 MachineFunction &MF = DAG.getMachineFunction();
1445 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
1446 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1447 SDOperand Chain = DAG.getNode(ISD::STORE, MVT::Other,
1448 DAG.getEntryNode(), Op.getOperand(0),
1449 StackSlot, DAG.getSrcValue(NULL));
1452 std::vector<MVT::ValueType> Tys;
1453 Tys.push_back(MVT::f64);
1454 Tys.push_back(MVT::Other);
1455 if (X86ScalarSSE) Tys.push_back(MVT::Flag);
1456 std::vector<SDOperand> Ops;
1457 Ops.push_back(Chain);
1458 Ops.push_back(StackSlot);
1459 Ops.push_back(DAG.getValueType(SrcVT));
1460 Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
1464 Chain = Result.getValue(1);
1465 SDOperand InFlag = Result.getValue(2);
1467 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
1468 // shouldn't be necessary except that RFP cannot be live across
1469 // multiple blocks. When stackifier is fixed, they can be uncoupled.
1470 MachineFunction &MF = DAG.getMachineFunction();
1471 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
1472 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1473 std::vector<MVT::ValueType> Tys;
1474 Tys.push_back(MVT::Other);
1475 std::vector<SDOperand> Ops;
1476 Ops.push_back(Chain);
1477 Ops.push_back(Result);
1478 Ops.push_back(StackSlot);
1479 Ops.push_back(DAG.getValueType(Op.getValueType()));
1480 Ops.push_back(InFlag);
1481 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
1482 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot,
1483 DAG.getSrcValue(NULL));
1488 case ISD::FP_TO_SINT: {
1489 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
1490 "Unknown FP_TO_SINT to lower!");
1491 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
1493 MachineFunction &MF = DAG.getMachineFunction();
1494 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
1495 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
1496 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1499 switch (Op.getValueType()) {
1500 default: assert(0 && "Invalid FP_TO_SINT to lower!");
1501 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
1502 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
1503 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
1506 SDOperand Chain = DAG.getEntryNode();
1507 SDOperand Value = Op.getOperand(0);
1509 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
1510 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value, StackSlot,
1511 DAG.getSrcValue(0));
1512 std::vector<MVT::ValueType> Tys;
1513 Tys.push_back(MVT::f64);
1514 Tys.push_back(MVT::Other);
1515 std::vector<SDOperand> Ops;
1516 Ops.push_back(Chain);
1517 Ops.push_back(StackSlot);
1518 Ops.push_back(DAG.getValueType(Op.getOperand(0).getValueType()));
1519 Value = DAG.getNode(X86ISD::FLD, Tys, Ops);
1520 Chain = Value.getValue(1);
1521 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
1522 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1525 // Build the FP_TO_INT*_IN_MEM
1526 std::vector<SDOperand> Ops;
1527 Ops.push_back(Chain);
1528 Ops.push_back(Value);
1529 Ops.push_back(StackSlot);
1530 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops);
1533 return DAG.getLoad(Op.getValueType(), FIST, StackSlot,
1534 DAG.getSrcValue(NULL));
1536 case ISD::READCYCLECOUNTER: {
1537 std::vector<MVT::ValueType> Tys;
1538 Tys.push_back(MVT::Other);
1539 Tys.push_back(MVT::Flag);
1540 std::vector<SDOperand> Ops;
1541 Ops.push_back(Op.getOperand(0));
1542 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, Ops);
1544 Ops.push_back(DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)));
1545 Ops.push_back(DAG.getCopyFromReg(Ops[0].getValue(1), X86::EDX,
1546 MVT::i32, Ops[0].getValue(2)));
1547 Ops.push_back(Ops[1].getValue(1));
1548 Tys[0] = Tys[1] = MVT::i32;
1549 Tys.push_back(MVT::Other);
1550 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
1553 MVT::ValueType VT = Op.getValueType();
1554 const Type *OpNTy = MVT::getTypeForValueType(VT);
1555 std::vector<Constant*> CV;
1556 if (VT == MVT::f64) {
1557 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63))));
1558 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1560 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31))));
1561 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1562 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1563 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1565 Constant *CS = ConstantStruct::get(CV);
1566 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
1568 = DAG.getNode(X86ISD::LOAD_PACK,
1569 VT, DAG.getEntryNode(), CPIdx, DAG.getSrcValue(NULL));
1570 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask);
1573 MVT::ValueType VT = Op.getValueType();
1574 const Type *OpNTy = MVT::getTypeForValueType(VT);
1575 std::vector<Constant*> CV;
1576 if (VT == MVT::f64) {
1577 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63)));
1578 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1580 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31)));
1581 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1582 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1583 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1585 Constant *CS = ConstantStruct::get(CV);
1586 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
1588 = DAG.getNode(X86ISD::LOAD_PACK,
1589 VT, DAG.getEntryNode(), CPIdx, DAG.getSrcValue(NULL));
1590 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask);
1593 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
1595 SDOperand CC = Op.getOperand(2);
1596 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
1597 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
1600 if (translateX86CC(CC, isFP, X86CC, Flip)) {
1602 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1603 Op.getOperand(1), Op.getOperand(0));
1605 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1606 Op.getOperand(0), Op.getOperand(1));
1607 return DAG.getNode(X86ISD::SETCC, MVT::i8,
1608 DAG.getConstant(X86CC, MVT::i8), Cond);
1610 assert(isFP && "Illegal integer SetCC!");
1612 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1613 Op.getOperand(0), Op.getOperand(1));
1614 std::vector<MVT::ValueType> Tys;
1615 std::vector<SDOperand> Ops;
1616 switch (SetCCOpcode) {
1617 default: assert(false && "Illegal floating point SetCC!");
1618 case ISD::SETOEQ: { // !PF & ZF
1619 Tys.push_back(MVT::i8);
1620 Tys.push_back(MVT::Flag);
1621 Ops.push_back(DAG.getConstant(X86ISD::COND_NP, MVT::i8));
1622 Ops.push_back(Cond);
1623 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1624 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
1625 DAG.getConstant(X86ISD::COND_E, MVT::i8),
1627 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
1629 case ISD::SETUNE: { // PF | !ZF
1630 Tys.push_back(MVT::i8);
1631 Tys.push_back(MVT::Flag);
1632 Ops.push_back(DAG.getConstant(X86ISD::COND_P, MVT::i8));
1633 Ops.push_back(Cond);
1634 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1635 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
1636 DAG.getConstant(X86ISD::COND_NE, MVT::i8),
1638 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
1644 MVT::ValueType VT = Op.getValueType();
1645 bool isFP = MVT::isFloatingPoint(VT);
1646 bool isFPStack = isFP && !X86ScalarSSE;
1647 bool isFPSSE = isFP && X86ScalarSSE;
1648 bool addTest = false;
1649 SDOperand Op0 = Op.getOperand(0);
1651 if (Op0.getOpcode() == ISD::SETCC)
1652 Op0 = LowerOperation(Op0, DAG);
1654 if (Op0.getOpcode() == X86ISD::SETCC) {
1655 // If condition flag is set by a X86ISD::CMP, then make a copy of it
1656 // (since flag operand cannot be shared). If the X86ISD::SETCC does not
1657 // have another use it will be eliminated.
1658 // If the X86ISD::SETCC has more than one use, then it's probably better
1659 // to use a test instead of duplicating the X86ISD::CMP (for register
1660 // pressure reason).
1661 if (Op0.getOperand(1).getOpcode() == X86ISD::CMP) {
1662 if (!Op0.hasOneUse()) {
1663 std::vector<MVT::ValueType> Tys;
1664 for (unsigned i = 0; i < Op0.Val->getNumValues(); ++i)
1665 Tys.push_back(Op0.Val->getValueType(i));
1666 std::vector<SDOperand> Ops;
1667 for (unsigned i = 0; i < Op0.getNumOperands(); ++i)
1668 Ops.push_back(Op0.getOperand(i));
1669 Op0 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1672 CC = Op0.getOperand(0);
1673 Cond = Op0.getOperand(1);
1674 // Make a copy as flag result cannot be used by more than one.
1675 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1676 Cond.getOperand(0), Cond.getOperand(1));
1678 isFPStack && !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
1685 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
1686 Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Op0, Op0);
1689 std::vector<MVT::ValueType> Tys;
1690 Tys.push_back(Op.getValueType());
1691 Tys.push_back(MVT::Flag);
1692 std::vector<SDOperand> Ops;
1693 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
1694 // condition is true.
1695 Ops.push_back(Op.getOperand(2));
1696 Ops.push_back(Op.getOperand(1));
1698 Ops.push_back(Cond);
1699 return DAG.getNode(X86ISD::CMOV, Tys, Ops);
1702 bool addTest = false;
1703 SDOperand Cond = Op.getOperand(1);
1704 SDOperand Dest = Op.getOperand(2);
1706 if (Cond.getOpcode() == ISD::SETCC)
1707 Cond = LowerOperation(Cond, DAG);
1709 if (Cond.getOpcode() == X86ISD::SETCC) {
1710 // If condition flag is set by a X86ISD::CMP, then make a copy of it
1711 // (since flag operand cannot be shared). If the X86ISD::SETCC does not
1712 // have another use it will be eliminated.
1713 // If the X86ISD::SETCC has more than one use, then it's probably better
1714 // to use a test instead of duplicating the X86ISD::CMP (for register
1715 // pressure reason).
1716 if (Cond.getOperand(1).getOpcode() == X86ISD::CMP) {
1717 if (!Cond.hasOneUse()) {
1718 std::vector<MVT::ValueType> Tys;
1719 for (unsigned i = 0; i < Cond.Val->getNumValues(); ++i)
1720 Tys.push_back(Cond.Val->getValueType(i));
1721 std::vector<SDOperand> Ops;
1722 for (unsigned i = 0; i < Cond.getNumOperands(); ++i)
1723 Ops.push_back(Cond.getOperand(i));
1724 Cond = DAG.getNode(X86ISD::SETCC, Tys, Ops);
1727 CC = Cond.getOperand(0);
1728 Cond = Cond.getOperand(1);
1729 // Make a copy as flag result cannot be used by more than one.
1730 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
1731 Cond.getOperand(0), Cond.getOperand(1));
1738 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
1739 Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Cond, Cond);
1741 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
1742 Op.getOperand(0), Op.getOperand(2), CC, Cond);
1745 SDOperand InFlag(0, 0);
1746 SDOperand Chain = Op.getOperand(0);
1748 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
1749 if (Align == 0) Align = 1;
1751 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
1752 // If not DWORD aligned, call memset if size is less than the threshold.
1753 // It knows how to align to the right boundary first.
1754 if ((Align & 3) != 0 ||
1755 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
1756 MVT::ValueType IntPtr = getPointerTy();
1757 const Type *IntPtrTy = getTargetData().getIntPtrType();
1758 std::vector<std::pair<SDOperand, const Type*> > Args;
1759 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
1760 // Extend the ubyte argument to be an int value for the call.
1761 SDOperand Val = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2));
1762 Args.push_back(std::make_pair(Val, IntPtrTy));
1763 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy));
1764 std::pair<SDOperand,SDOperand> CallResult =
1765 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false,
1766 DAG.getExternalSymbol("memset", IntPtr), Args, DAG);
1767 return CallResult.second;
1772 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2));
1773 unsigned BytesLeft = 0;
1774 bool TwoRepStos = false;
1777 unsigned Val = ValC->getValue() & 255;
1779 // If the value is a constant, then we can potentially use larger sets.
1780 switch (Align & 3) {
1781 case 2: // WORD aligned
1783 Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
1784 BytesLeft = I->getValue() % 2;
1785 Val = (Val << 8) | Val;
1788 case 0: // DWORD aligned
1791 Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
1792 BytesLeft = I->getValue() % 4;
1794 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
1795 DAG.getConstant(2, MVT::i8));
1798 Val = (Val << 8) | Val;
1799 Val = (Val << 16) | Val;
1802 default: // Byte aligned
1804 Count = Op.getOperand(3);
1809 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT),
1811 InFlag = Chain.getValue(1);
1814 Count = Op.getOperand(3);
1815 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag);
1816 InFlag = Chain.getValue(1);
1819 Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
1820 InFlag = Chain.getValue(1);
1821 Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
1822 InFlag = Chain.getValue(1);
1824 std::vector<MVT::ValueType> Tys;
1825 Tys.push_back(MVT::Other);
1826 Tys.push_back(MVT::Flag);
1827 std::vector<SDOperand> Ops;
1828 Ops.push_back(Chain);
1829 Ops.push_back(DAG.getValueType(AVT));
1830 Ops.push_back(InFlag);
1831 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, Ops);
1834 InFlag = Chain.getValue(1);
1835 Count = Op.getOperand(3);
1836 MVT::ValueType CVT = Count.getValueType();
1837 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
1838 DAG.getConstant(3, CVT));
1839 Chain = DAG.getCopyToReg(Chain, X86::ECX, Left, InFlag);
1840 InFlag = Chain.getValue(1);
1842 Tys.push_back(MVT::Other);
1843 Tys.push_back(MVT::Flag);
1845 Ops.push_back(Chain);
1846 Ops.push_back(DAG.getValueType(MVT::i8));
1847 Ops.push_back(InFlag);
1848 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, Ops);
1849 } else if (BytesLeft) {
1850 // Issue stores for the last 1 - 3 bytes.
1852 unsigned Val = ValC->getValue() & 255;
1853 unsigned Offset = I->getValue() - BytesLeft;
1854 SDOperand DstAddr = Op.getOperand(1);
1855 MVT::ValueType AddrVT = DstAddr.getValueType();
1856 if (BytesLeft >= 2) {
1857 Value = DAG.getConstant((Val << 8) | Val, MVT::i16);
1858 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
1859 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
1860 DAG.getConstant(Offset, AddrVT)),
1861 DAG.getSrcValue(NULL));
1866 if (BytesLeft == 1) {
1867 Value = DAG.getConstant(Val, MVT::i8);
1868 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
1869 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
1870 DAG.getConstant(Offset, AddrVT)),
1871 DAG.getSrcValue(NULL));
1878 SDOperand Chain = Op.getOperand(0);
1880 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
1881 if (Align == 0) Align = 1;
1883 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
1884 // If not DWORD aligned, call memcpy if size is less than the threshold.
1885 // It knows how to align to the right boundary first.
1886 if ((Align & 3) != 0 ||
1887 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
1888 MVT::ValueType IntPtr = getPointerTy();
1889 const Type *IntPtrTy = getTargetData().getIntPtrType();
1890 std::vector<std::pair<SDOperand, const Type*> > Args;
1891 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
1892 Args.push_back(std::make_pair(Op.getOperand(2), IntPtrTy));
1893 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy));
1894 std::pair<SDOperand,SDOperand> CallResult =
1895 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false,
1896 DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG);
1897 return CallResult.second;
1902 unsigned BytesLeft = 0;
1903 bool TwoRepMovs = false;
1904 switch (Align & 3) {
1905 case 2: // WORD aligned
1907 Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
1908 BytesLeft = I->getValue() % 2;
1910 case 0: // DWORD aligned
1913 Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
1914 BytesLeft = I->getValue() % 4;
1916 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
1917 DAG.getConstant(2, MVT::i8));
1921 default: // Byte aligned
1923 Count = Op.getOperand(3);
1927 SDOperand InFlag(0, 0);
1928 Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
1929 InFlag = Chain.getValue(1);
1930 Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
1931 InFlag = Chain.getValue(1);
1932 Chain = DAG.getCopyToReg(Chain, X86::ESI, Op.getOperand(2), InFlag);
1933 InFlag = Chain.getValue(1);
1935 std::vector<MVT::ValueType> Tys;
1936 Tys.push_back(MVT::Other);
1937 Tys.push_back(MVT::Flag);
1938 std::vector<SDOperand> Ops;
1939 Ops.push_back(Chain);
1940 Ops.push_back(DAG.getValueType(AVT));
1941 Ops.push_back(InFlag);
1942 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, Ops);
1945 InFlag = Chain.getValue(1);
1946 Count = Op.getOperand(3);
1947 MVT::ValueType CVT = Count.getValueType();
1948 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
1949 DAG.getConstant(3, CVT));
1950 Chain = DAG.getCopyToReg(Chain, X86::ECX, Left, InFlag);
1951 InFlag = Chain.getValue(1);
1953 Tys.push_back(MVT::Other);
1954 Tys.push_back(MVT::Flag);
1956 Ops.push_back(Chain);
1957 Ops.push_back(DAG.getValueType(MVT::i8));
1958 Ops.push_back(InFlag);
1959 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, Ops);
1960 } else if (BytesLeft) {
1961 // Issue loads and stores for the last 1 - 3 bytes.
1962 unsigned Offset = I->getValue() - BytesLeft;
1963 SDOperand DstAddr = Op.getOperand(1);
1964 MVT::ValueType DstVT = DstAddr.getValueType();
1965 SDOperand SrcAddr = Op.getOperand(2);
1966 MVT::ValueType SrcVT = SrcAddr.getValueType();
1968 if (BytesLeft >= 2) {
1969 Value = DAG.getLoad(MVT::i16, Chain,
1970 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
1971 DAG.getConstant(Offset, SrcVT)),
1972 DAG.getSrcValue(NULL));
1973 Chain = Value.getValue(1);
1974 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
1975 DAG.getNode(ISD::ADD, DstVT, DstAddr,
1976 DAG.getConstant(Offset, DstVT)),
1977 DAG.getSrcValue(NULL));
1982 if (BytesLeft == 1) {
1983 Value = DAG.getLoad(MVT::i8, Chain,
1984 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
1985 DAG.getConstant(Offset, SrcVT)),
1986 DAG.getSrcValue(NULL));
1987 Chain = Value.getValue(1);
1988 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
1989 DAG.getNode(ISD::ADD, DstVT, DstAddr,
1990 DAG.getConstant(Offset, DstVT)),
1991 DAG.getSrcValue(NULL));
1998 // ConstantPool, GlobalAddress, and ExternalSymbol are lowered as their
1999 // target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
2000 // one of the above mentioned nodes. It has to be wrapped because otherwise
2001 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2002 // be used to form addressing mode. These wrapped nodes will be selected
2004 case ISD::ConstantPool: {
2005 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2006 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
2007 DAG.getTargetConstantPool(CP->get(), getPointerTy(),
2008 CP->getAlignment()));
2009 if (getTargetMachine().getSubtarget<X86Subtarget>().isTargetDarwin()) {
2010 // With PIC, the address is actually $g + Offset.
2011 if (getTargetMachine().getRelocationModel() == Reloc::PIC)
2012 Result = DAG.getNode(ISD::ADD, getPointerTy(),
2013 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
2018 case ISD::GlobalAddress: {
2019 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2020 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
2021 DAG.getTargetGlobalAddress(GV, getPointerTy()));
2022 if (getTargetMachine().
2023 getSubtarget<X86Subtarget>().isTargetDarwin()) {
2024 // With PIC, the address is actually $g + Offset.
2025 if (getTargetMachine().getRelocationModel() == Reloc::PIC)
2026 Result = DAG.getNode(ISD::ADD, getPointerTy(),
2027 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
2029 // For Darwin, external and weak symbols are indirect, so we want to load
2030 // the value at address GV, not the value of GV itself. This means that
2031 // the GlobalAddress must be in the base or index register of the address,
2032 // not the GV offset field.
2033 if (getTargetMachine().getRelocationModel() != Reloc::Static &&
2034 DarwinGVRequiresExtraLoad(GV))
2035 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(),
2036 Result, DAG.getSrcValue(NULL));
2041 case ISD::ExternalSymbol: {
2042 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
2043 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
2044 DAG.getTargetExternalSymbol(Sym, getPointerTy()));
2045 if (getTargetMachine().
2046 getSubtarget<X86Subtarget>().isTargetDarwin()) {
2047 // With PIC, the address is actually $g + Offset.
2048 if (getTargetMachine().getRelocationModel() == Reloc::PIC)
2049 Result = DAG.getNode(ISD::ADD, getPointerTy(),
2050 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
2055 case ISD::VASTART: {
2056 // vastart just stores the address of the VarArgsFrameIndex slot into the
2057 // memory location argument.
2058 // FIXME: Replace MVT::i32 with PointerTy
2059 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
2060 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
2061 Op.getOperand(1), Op.getOperand(2));
2066 switch(Op.getNumOperands()) {
2068 assert(0 && "Do not know how to return this many arguments!");
2071 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Op.getOperand(0),
2072 DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
2074 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
2075 if (MVT::isInteger(ArgVT))
2076 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::EAX, Op.getOperand(1),
2078 else if (!X86ScalarSSE) {
2079 std::vector<MVT::ValueType> Tys;
2080 Tys.push_back(MVT::Other);
2081 Tys.push_back(MVT::Flag);
2082 std::vector<SDOperand> Ops;
2083 Ops.push_back(Op.getOperand(0));
2084 Ops.push_back(Op.getOperand(1));
2085 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
2088 SDOperand Chain = Op.getOperand(0);
2089 SDOperand Value = Op.getOperand(1);
2091 if (Value.getOpcode() == ISD::LOAD &&
2092 (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) {
2093 Chain = Value.getOperand(0);
2094 MemLoc = Value.getOperand(1);
2096 // Spill the value to memory and reload it into top of stack.
2097 unsigned Size = MVT::getSizeInBits(ArgVT)/8;
2098 MachineFunction &MF = DAG.getMachineFunction();
2099 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
2100 MemLoc = DAG.getFrameIndex(SSFI, getPointerTy());
2101 Chain = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
2102 Value, MemLoc, DAG.getSrcValue(0));
2104 std::vector<MVT::ValueType> Tys;
2105 Tys.push_back(MVT::f64);
2106 Tys.push_back(MVT::Other);
2107 std::vector<SDOperand> Ops;
2108 Ops.push_back(Chain);
2109 Ops.push_back(MemLoc);
2110 Ops.push_back(DAG.getValueType(ArgVT));
2111 Copy = DAG.getNode(X86ISD::FLD, Tys, Ops);
2113 Tys.push_back(MVT::Other);
2114 Tys.push_back(MVT::Flag);
2116 Ops.push_back(Copy.getValue(1));
2117 Ops.push_back(Copy);
2118 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
2123 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::EDX, Op.getOperand(2),
2125 Copy = DAG.getCopyToReg(Copy, X86::EAX,Op.getOperand(1),Copy.getValue(1));
2128 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other,
2129 Copy, DAG.getConstant(getBytesToPopOnReturn(), MVT::i16),
2135 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
2137 default: return NULL;
2138 case X86ISD::SHLD: return "X86ISD::SHLD";
2139 case X86ISD::SHRD: return "X86ISD::SHRD";
2140 case X86ISD::FAND: return "X86ISD::FAND";
2141 case X86ISD::FXOR: return "X86ISD::FXOR";
2142 case X86ISD::FILD: return "X86ISD::FILD";
2143 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
2144 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
2145 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
2146 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
2147 case X86ISD::FLD: return "X86ISD::FLD";
2148 case X86ISD::FST: return "X86ISD::FST";
2149 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT";
2150 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT";
2151 case X86ISD::CALL: return "X86ISD::CALL";
2152 case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
2153 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
2154 case X86ISD::CMP: return "X86ISD::CMP";
2155 case X86ISD::TEST: return "X86ISD::TEST";
2156 case X86ISD::SETCC: return "X86ISD::SETCC";
2157 case X86ISD::CMOV: return "X86ISD::CMOV";
2158 case X86ISD::BRCOND: return "X86ISD::BRCOND";
2159 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
2160 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
2161 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
2162 case X86ISD::LOAD_PACK: return "X86ISD::LOAD_PACK";
2163 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
2164 case X86ISD::Wrapper: return "X86ISD::Wrapper";
2168 void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
2170 uint64_t &KnownZero,
2172 unsigned Depth) const {
2174 unsigned Opc = Op.getOpcode();
2175 KnownZero = KnownOne = 0; // Don't know anything.
2179 assert(Opc >= ISD::BUILTIN_OP_END && "Expected a target specific node");
2182 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
2187 std::vector<unsigned> X86TargetLowering::
2188 getRegClassForInlineAsmConstraint(const std::string &Constraint,
2189 MVT::ValueType VT) const {
2190 if (Constraint.size() == 1) {
2191 // FIXME: not handling fp-stack yet!
2192 // FIXME: not handling MMX registers yet ('y' constraint).
2193 switch (Constraint[0]) { // GCC X86 Constraint Letters
2194 default: break; // Unknown constriant letter
2195 case 'r': // GENERAL_REGS
2196 case 'R': // LEGACY_REGS
2197 return make_vector<unsigned>(X86::EAX, X86::EBX, X86::ECX, X86::EDX,
2198 X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0);
2199 case 'l': // INDEX_REGS
2200 return make_vector<unsigned>(X86::EAX, X86::EBX, X86::ECX, X86::EDX,
2201 X86::ESI, X86::EDI, X86::EBP, 0);
2202 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
2204 return make_vector<unsigned>(X86::EAX, X86::EBX, X86::ECX, X86::EDX, 0);
2205 case 'x': // SSE_REGS if SSE1 allowed
2206 if (Subtarget->hasSSE1())
2207 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2208 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
2210 return std::vector<unsigned>();
2211 case 'Y': // SSE_REGS if SSE2 allowed
2212 if (Subtarget->hasSSE2())
2213 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2214 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
2216 return std::vector<unsigned>();
2220 return std::vector<unsigned>();
2223 /// isLegalAddressImmediate - Return true if the integer value or
2224 /// GlobalValue can be used as the offset of the target addressing mode.
2225 bool X86TargetLowering::isLegalAddressImmediate(int64_t V) const {
2226 // X86 allows a sign-extended 32-bit immediate field.
2227 return (V > -(1LL << 32) && V < (1LL << 32)-1);
2230 bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const {
2231 if (getTargetMachine().
2232 getSubtarget<X86Subtarget>().isTargetDarwin()) {
2233 Reloc::Model RModel = getTargetMachine().getRelocationModel();
2234 if (RModel == Reloc::Static)
2236 else if (RModel == Reloc::DynamicNoPIC)
2237 return !DarwinGVRequiresExtraLoad(GV);