1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
16 #include "X86InstrBuilder.h"
17 #include "X86ISelLowering.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Function.h"
22 #include "llvm/ADT/VectorExtras.h"
23 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/SSARegMap.h"
29 #include "llvm/Support/MathExtras.h"
30 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
36 cl::desc("Enable fastcc on X86"));
38 X86TargetLowering::X86TargetLowering(TargetMachine &TM)
39 : TargetLowering(TM) {
40 Subtarget = &TM.getSubtarget<X86Subtarget>();
41 X86ScalarSSE = Subtarget->hasSSE2();
43 // Set up the TargetLowering object.
45 // X86 is weird, it always uses i8 for shift amounts and setcc results.
46 setShiftAmountType(MVT::i8);
47 setSetCCResultType(MVT::i8);
48 setSetCCResultContents(ZeroOrOneSetCCResult);
49 setSchedulingPreference(SchedulingForRegPressure);
50 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
51 setStackPointerRegisterToSaveRestore(X86::ESP);
53 if (!Subtarget->isTargetDarwin())
54 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
55 setUseUnderscoreSetJmpLongJmp(true);
57 // Add legal addressing mode scale values.
58 addLegalAddressScale(8);
59 addLegalAddressScale(4);
60 addLegalAddressScale(2);
61 // Enter the ones which require both scale + index last. These are more
63 addLegalAddressScale(9);
64 addLegalAddressScale(5);
65 addLegalAddressScale(3);
67 // Set up the register classes.
68 addRegisterClass(MVT::i8, X86::R8RegisterClass);
69 addRegisterClass(MVT::i16, X86::R16RegisterClass);
70 addRegisterClass(MVT::i32, X86::R32RegisterClass);
72 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
74 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
75 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
76 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
79 // No SSE i64 SINT_TO_FP, so expand i32 UINT_TO_FP instead.
80 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
82 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
84 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
86 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
87 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
88 // SSE has no i16 to fp conversion, only i32
90 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
92 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
93 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
96 // We can handle SINT_TO_FP and FP_TO_SINT from/to i64 even though i64
98 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
99 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
101 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
103 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
104 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
107 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
109 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
110 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
113 // Handle FP_TO_UINT by promoting the destination to a larger signed
115 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
116 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
117 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
119 if (X86ScalarSSE && !Subtarget->hasSSE3())
120 // Expand FP_TO_UINT into a select.
121 // FIXME: We would like to use a Custom expander here eventually to do
122 // the optimal thing for SSE vs. the default expansion in the legalizer.
123 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
125 // With SSE3 we can use fisttpll to convert to a signed i64.
126 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
128 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
129 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
131 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
132 setOperationAction(ISD::BR_CC , MVT::Other, Expand);
133 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
134 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
135 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
136 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
137 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
138 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
139 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
140 setOperationAction(ISD::FREM , MVT::f64 , Expand);
141 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
142 setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
143 setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
144 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
145 setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
146 setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
147 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
148 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
149 setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
150 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
151 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
153 // These should be promoted to a larger select which is supported.
154 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
155 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
157 // X86 wants to expand cmov itself.
158 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
159 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
160 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
161 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
162 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
163 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
164 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
165 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
166 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
167 // X86 ret instruction may pop stack.
168 setOperationAction(ISD::RET , MVT::Other, Custom);
170 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
171 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
172 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
173 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
174 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
175 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
176 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
177 // X86 wants to expand memset / memcpy itself.
178 setOperationAction(ISD::MEMSET , MVT::Other, Custom);
179 setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
181 // We don't have line number support yet.
182 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
183 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
184 // FIXME - use subtarget debug flags
185 if (!Subtarget->isTargetDarwin())
186 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
188 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
189 setOperationAction(ISD::VASTART , MVT::Other, Custom);
191 // Use the default implementation.
192 setOperationAction(ISD::VAARG , MVT::Other, Expand);
193 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
194 setOperationAction(ISD::VAEND , MVT::Other, Expand);
195 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
196 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
197 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
199 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
200 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
203 // Set up the FP register classes.
204 addRegisterClass(MVT::f32, X86::FR32RegisterClass);
205 addRegisterClass(MVT::f64, X86::FR64RegisterClass);
207 // SSE has no load+extend ops
208 setOperationAction(ISD::EXTLOAD, MVT::f32, Expand);
209 setOperationAction(ISD::ZEXTLOAD, MVT::f32, Expand);
211 // Use ANDPD to simulate FABS.
212 setOperationAction(ISD::FABS , MVT::f64, Custom);
213 setOperationAction(ISD::FABS , MVT::f32, Custom);
215 // Use XORP to simulate FNEG.
216 setOperationAction(ISD::FNEG , MVT::f64, Custom);
217 setOperationAction(ISD::FNEG , MVT::f32, Custom);
219 // We don't support sin/cos/fmod
220 setOperationAction(ISD::FSIN , MVT::f64, Expand);
221 setOperationAction(ISD::FCOS , MVT::f64, Expand);
222 setOperationAction(ISD::FREM , MVT::f64, Expand);
223 setOperationAction(ISD::FSIN , MVT::f32, Expand);
224 setOperationAction(ISD::FCOS , MVT::f32, Expand);
225 setOperationAction(ISD::FREM , MVT::f32, Expand);
227 // Expand FP immediates into loads from the stack, except for the special
229 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
230 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
231 addLegalFPImmediate(+0.0); // xorps / xorpd
233 // Set up the FP register classes.
234 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
236 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
239 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
240 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
243 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
244 addLegalFPImmediate(+0.0); // FLD0
245 addLegalFPImmediate(+1.0); // FLD1
246 addLegalFPImmediate(-0.0); // FLD0/FCHS
247 addLegalFPImmediate(-1.0); // FLD1/FCHS
250 // First set operation action for all vector types to expand. Then we
251 // will selectively turn on ones that can be effectively codegen'd.
252 for (unsigned VT = (unsigned)MVT::Vector + 1;
253 VT != (unsigned)MVT::LAST_VALUETYPE; VT++) {
254 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
255 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
256 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
257 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand);
258 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand);
259 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
262 if (Subtarget->hasMMX()) {
263 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
264 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
265 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
267 // FIXME: add MMX packed arithmetics
268 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Expand);
269 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Expand);
270 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Expand);
273 if (Subtarget->hasSSE1()) {
274 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
276 setOperationAction(ISD::ADD, MVT::v4f32, Legal);
277 setOperationAction(ISD::SUB, MVT::v4f32, Legal);
278 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
279 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
280 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
281 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
284 if (Subtarget->hasSSE2()) {
285 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
286 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
287 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
288 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
289 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
292 setOperationAction(ISD::ADD, MVT::v2f64, Legal);
293 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
294 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
295 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
296 setOperationAction(ISD::SUB, MVT::v2f64, Legal);
297 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
298 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
299 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
300 setOperationAction(ISD::MUL, MVT::v2f64, Legal);
301 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
302 setOperationAction(ISD::LOAD, MVT::v16i8, Legal);
303 setOperationAction(ISD::LOAD, MVT::v8i16, Legal);
304 setOperationAction(ISD::LOAD, MVT::v4i32, Legal);
305 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
306 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
307 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
308 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
309 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
310 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
311 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
312 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
313 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
314 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
315 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom);
316 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom);
317 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
320 computeRegisterProperties();
322 // FIXME: These should be based on subtarget info. Plus, the values should
323 // be smaller when we are in optimizing for size mode.
324 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores
325 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores
326 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores
327 allowUnalignedMemoryAccesses = true; // x86 supports it!
330 std::vector<SDOperand>
331 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
332 if (F.getCallingConv() == CallingConv::Fast && EnableFastCC)
333 return LowerFastCCArguments(F, DAG);
334 return LowerCCCArguments(F, DAG);
337 std::pair<SDOperand, SDOperand>
338 X86TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
339 bool isVarArg, unsigned CallingConv,
341 SDOperand Callee, ArgListTy &Args,
343 assert((!isVarArg || CallingConv == CallingConv::C) &&
344 "Only C takes varargs!");
346 // If the callee is a GlobalAddress node (quite common, every direct call is)
347 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
348 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
349 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
350 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
351 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
353 if (CallingConv == CallingConv::Fast && EnableFastCC)
354 return LowerFastCCCallTo(Chain, RetTy, isTailCall, Callee, Args, DAG);
355 return LowerCCCCallTo(Chain, RetTy, isVarArg, isTailCall, Callee, Args, DAG);
358 //===----------------------------------------------------------------------===//
359 // C Calling Convention implementation
360 //===----------------------------------------------------------------------===//
362 std::vector<SDOperand>
363 X86TargetLowering::LowerCCCArguments(Function &F, SelectionDAG &DAG) {
364 std::vector<SDOperand> ArgValues;
366 MachineFunction &MF = DAG.getMachineFunction();
367 MachineFrameInfo *MFI = MF.getFrameInfo();
369 // Add DAG nodes to load the arguments... On entry to a function on the X86,
370 // the stack frame looks like this:
372 // [ESP] -- return address
373 // [ESP + 4] -- first argument (leftmost lexically)
374 // [ESP + 8] -- second argument, if first argument is four bytes in size
377 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
378 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
379 MVT::ValueType ObjectVT = getValueType(I->getType());
380 unsigned ArgIncrement = 4;
383 default: assert(0 && "Unhandled argument type!");
385 case MVT::i8: ObjSize = 1; break;
386 case MVT::i16: ObjSize = 2; break;
387 case MVT::i32: ObjSize = 4; break;
388 case MVT::i64: ObjSize = ArgIncrement = 8; break;
389 case MVT::f32: ObjSize = 4; break;
390 case MVT::f64: ObjSize = ArgIncrement = 8; break;
392 // Create the frame index object for this incoming parameter...
393 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
395 // Create the SelectionDAG nodes corresponding to a load from this parameter
396 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
398 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
402 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
403 DAG.getSrcValue(NULL));
405 if (MVT::isInteger(ObjectVT))
406 ArgValue = DAG.getConstant(0, ObjectVT);
408 ArgValue = DAG.getConstantFP(0, ObjectVT);
410 ArgValues.push_back(ArgValue);
412 ArgOffset += ArgIncrement; // Move on to the next argument...
415 // If the function takes variable number of arguments, make a frame index for
416 // the start of the first vararg value... for expansion of llvm.va_start.
418 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
419 ReturnAddrIndex = 0; // No return address slot generated yet.
420 BytesToPopOnReturn = 0; // Callee pops nothing.
421 BytesCallerReserves = ArgOffset;
423 // Finally, inform the code generator which regs we return values in.
424 switch (getValueType(F.getReturnType())) {
425 default: assert(0 && "Unknown type!");
426 case MVT::isVoid: break;
431 MF.addLiveOut(X86::EAX);
434 MF.addLiveOut(X86::EAX);
435 MF.addLiveOut(X86::EDX);
439 MF.addLiveOut(X86::ST0);
445 std::pair<SDOperand, SDOperand>
446 X86TargetLowering::LowerCCCCallTo(SDOperand Chain, const Type *RetTy,
447 bool isVarArg, bool isTailCall,
448 SDOperand Callee, ArgListTy &Args,
450 // Count how many bytes are to be pushed on the stack.
451 unsigned NumBytes = 0;
455 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(0, getPointerTy()));
457 for (unsigned i = 0, e = Args.size(); i != e; ++i)
458 switch (getValueType(Args[i].second)) {
459 default: assert(0 && "Unknown value type!");
473 Chain = DAG.getCALLSEQ_START(Chain,
474 DAG.getConstant(NumBytes, getPointerTy()));
476 // Arguments go on the stack in reverse order, as specified by the ABI.
477 unsigned ArgOffset = 0;
478 SDOperand StackPtr = DAG.getRegister(X86::ESP, MVT::i32);
479 std::vector<SDOperand> Stores;
481 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
482 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
483 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
485 switch (getValueType(Args[i].second)) {
486 default: assert(0 && "Unexpected ValueType for argument!");
490 // Promote the integer to 32 bits. If the input type is signed use a
491 // sign extend, otherwise use a zero extend.
492 if (Args[i].second->isSigned())
493 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
495 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
500 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
501 Args[i].first, PtrOff,
502 DAG.getSrcValue(NULL)));
507 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
508 Args[i].first, PtrOff,
509 DAG.getSrcValue(NULL)));
514 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
517 std::vector<MVT::ValueType> RetVals;
518 MVT::ValueType RetTyVT = getValueType(RetTy);
519 RetVals.push_back(MVT::Other);
521 // The result values produced have to be legal. Promote the result.
523 case MVT::isVoid: break;
525 RetVals.push_back(RetTyVT);
530 RetVals.push_back(MVT::i32);
534 RetVals.push_back(MVT::f32);
536 RetVals.push_back(MVT::f64);
539 RetVals.push_back(MVT::i32);
540 RetVals.push_back(MVT::i32);
544 std::vector<MVT::ValueType> NodeTys;
545 NodeTys.push_back(MVT::Other); // Returns a chain
546 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
547 std::vector<SDOperand> Ops;
548 Ops.push_back(Chain);
549 Ops.push_back(Callee);
551 // FIXME: Do not generate X86ISD::TAILCALL for now.
552 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
553 SDOperand InFlag = Chain.getValue(1);
556 NodeTys.push_back(MVT::Other); // Returns a chain
557 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
559 Ops.push_back(Chain);
560 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
561 Ops.push_back(DAG.getConstant(0, getPointerTy()));
562 Ops.push_back(InFlag);
563 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, Ops);
564 InFlag = Chain.getValue(1);
567 if (RetTyVT != MVT::isVoid) {
569 default: assert(0 && "Unknown value type to return!");
572 RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
573 Chain = RetVal.getValue(1);
574 if (RetTyVT == MVT::i1)
575 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::i1, RetVal);
578 RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
579 Chain = RetVal.getValue(1);
582 RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
583 Chain = RetVal.getValue(1);
586 SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
587 SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
589 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
590 Chain = Hi.getValue(1);
595 std::vector<MVT::ValueType> Tys;
596 Tys.push_back(MVT::f64);
597 Tys.push_back(MVT::Other);
598 Tys.push_back(MVT::Flag);
599 std::vector<SDOperand> Ops;
600 Ops.push_back(Chain);
601 Ops.push_back(InFlag);
602 RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
603 Chain = RetVal.getValue(1);
604 InFlag = RetVal.getValue(2);
606 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
607 // shouldn't be necessary except that RFP cannot be live across
608 // multiple blocks. When stackifier is fixed, they can be uncoupled.
609 MachineFunction &MF = DAG.getMachineFunction();
610 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
611 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
613 Tys.push_back(MVT::Other);
615 Ops.push_back(Chain);
616 Ops.push_back(RetVal);
617 Ops.push_back(StackSlot);
618 Ops.push_back(DAG.getValueType(RetTyVT));
619 Ops.push_back(InFlag);
620 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
621 RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
622 DAG.getSrcValue(NULL));
623 Chain = RetVal.getValue(1);
626 if (RetTyVT == MVT::f32 && !X86ScalarSSE)
627 // FIXME: we would really like to remember that this FP_ROUND
628 // operation is okay to eliminate if we allow excess FP precision.
629 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
635 return std::make_pair(RetVal, Chain);
638 //===----------------------------------------------------------------------===//
639 // Fast Calling Convention implementation
640 //===----------------------------------------------------------------------===//
642 // The X86 'fast' calling convention passes up to two integer arguments in
643 // registers (an appropriate portion of EAX/EDX), passes arguments in C order,
644 // and requires that the callee pop its arguments off the stack (allowing proper
645 // tail calls), and has the same return value conventions as C calling convs.
647 // This calling convention always arranges for the callee pop value to be 8n+4
648 // bytes, which is needed for tail recursion elimination and stack alignment
651 // Note that this can be enhanced in the future to pass fp vals in registers
652 // (when we have a global fp allocator) and do other tricks.
655 /// AddLiveIn - This helper function adds the specified physical register to the
656 /// MachineFunction as a live in value. It also creates a corresponding virtual
658 static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
659 TargetRegisterClass *RC) {
660 assert(RC->contains(PReg) && "Not the correct regclass!");
661 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
662 MF.addLiveIn(PReg, VReg);
666 // FASTCC_NUM_INT_ARGS_INREGS - This is the max number of integer arguments
667 // to pass in registers. 0 is none, 1 is is "use EAX", 2 is "use EAX and
668 // EDX". Anything more is illegal.
670 // FIXME: The linscan register allocator currently has problem with
671 // coalescing. At the time of this writing, whenever it decides to coalesce
672 // a physreg with a virtreg, this increases the size of the physreg's live
673 // range, and the live range cannot ever be reduced. This causes problems if
674 // too many physregs are coaleced with virtregs, which can cause the register
675 // allocator to wedge itself.
677 // This code triggers this problem more often if we pass args in registers,
678 // so disable it until this is fixed.
680 // NOTE: this isn't marked const, so that GCC doesn't emit annoying warnings
681 // about code being dead.
683 static unsigned FASTCC_NUM_INT_ARGS_INREGS = 0;
686 std::vector<SDOperand>
687 X86TargetLowering::LowerFastCCArguments(Function &F, SelectionDAG &DAG) {
688 std::vector<SDOperand> ArgValues;
690 MachineFunction &MF = DAG.getMachineFunction();
691 MachineFrameInfo *MFI = MF.getFrameInfo();
693 // Add DAG nodes to load the arguments... On entry to a function the stack
694 // frame looks like this:
696 // [ESP] -- return address
697 // [ESP + 4] -- first nonreg argument (leftmost lexically)
698 // [ESP + 8] -- second nonreg argument, if first argument is 4 bytes in size
700 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
702 // Keep track of the number of integer regs passed so far. This can be either
703 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
705 unsigned NumIntRegs = 0;
707 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
708 MVT::ValueType ObjectVT = getValueType(I->getType());
709 unsigned ArgIncrement = 4;
710 unsigned ObjSize = 0;
714 default: assert(0 && "Unhandled argument type!");
717 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
718 if (!I->use_empty()) {
719 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
720 X86::R8RegisterClass);
721 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i8);
722 DAG.setRoot(ArgValue.getValue(1));
723 if (ObjectVT == MVT::i1)
724 // FIXME: Should insert a assertzext here.
725 ArgValue = DAG.getNode(ISD::TRUNCATE, MVT::i1, ArgValue);
734 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
735 if (!I->use_empty()) {
736 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
737 X86::R16RegisterClass);
738 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i16);
739 DAG.setRoot(ArgValue.getValue(1));
747 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
748 if (!I->use_empty()) {
749 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
750 X86::R32RegisterClass);
751 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
752 DAG.setRoot(ArgValue.getValue(1));
760 if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
761 if (!I->use_empty()) {
762 unsigned BotReg = AddLiveIn(MF, X86::EAX, X86::R32RegisterClass);
763 unsigned TopReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
765 SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
766 SDOperand Hi = DAG.getCopyFromReg(Low.getValue(1), TopReg, MVT::i32);
767 DAG.setRoot(Hi.getValue(1));
769 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
773 } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
774 if (!I->use_empty()) {
775 unsigned BotReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
776 SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
777 DAG.setRoot(Low.getValue(1));
779 // Load the high part from memory.
780 // Create the frame index object for this incoming parameter...
781 int FI = MFI->CreateFixedObject(4, ArgOffset);
782 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
783 SDOperand Hi = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
784 DAG.getSrcValue(NULL));
785 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
788 NumIntRegs = FASTCC_NUM_INT_ARGS_INREGS;
791 ObjSize = ArgIncrement = 8;
793 case MVT::f32: ObjSize = 4; break;
794 case MVT::f64: ObjSize = ArgIncrement = 8; break;
797 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
799 if (ObjSize && !I->use_empty()) {
800 // Create the frame index object for this incoming parameter...
801 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
803 // Create the SelectionDAG nodes corresponding to a load from this
805 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
807 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
808 DAG.getSrcValue(NULL));
809 } else if (ArgValue.Val == 0) {
810 if (MVT::isInteger(ObjectVT))
811 ArgValue = DAG.getConstant(0, ObjectVT);
813 ArgValue = DAG.getConstantFP(0, ObjectVT);
815 ArgValues.push_back(ArgValue);
818 ArgOffset += ArgIncrement; // Move on to the next argument.
821 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
822 // arguments and the arguments after the retaddr has been pushed are aligned.
823 if ((ArgOffset & 7) == 0)
826 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
827 ReturnAddrIndex = 0; // No return address slot generated yet.
828 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
829 BytesCallerReserves = 0;
831 // Finally, inform the code generator which regs we return values in.
832 switch (getValueType(F.getReturnType())) {
833 default: assert(0 && "Unknown type!");
834 case MVT::isVoid: break;
839 MF.addLiveOut(X86::EAX);
842 MF.addLiveOut(X86::EAX);
843 MF.addLiveOut(X86::EDX);
847 MF.addLiveOut(X86::ST0);
853 std::pair<SDOperand, SDOperand>
854 X86TargetLowering::LowerFastCCCallTo(SDOperand Chain, const Type *RetTy,
855 bool isTailCall, SDOperand Callee,
856 ArgListTy &Args, SelectionDAG &DAG) {
857 // Count how many bytes are to be pushed on the stack.
858 unsigned NumBytes = 0;
860 // Keep track of the number of integer regs passed so far. This can be either
861 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
863 unsigned NumIntRegs = 0;
865 for (unsigned i = 0, e = Args.size(); i != e; ++i)
866 switch (getValueType(Args[i].second)) {
867 default: assert(0 && "Unknown value type!");
872 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
881 if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
884 } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
885 NumIntRegs = FASTCC_NUM_INT_ARGS_INREGS;
896 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
897 // arguments and the arguments after the retaddr has been pushed are aligned.
898 if ((NumBytes & 7) == 0)
901 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
903 // Arguments go on the stack in reverse order, as specified by the ABI.
904 unsigned ArgOffset = 0;
905 SDOperand StackPtr = DAG.getRegister(X86::ESP, MVT::i32);
907 std::vector<SDOperand> Stores;
908 std::vector<SDOperand> RegValuesToPass;
909 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
910 switch (getValueType(Args[i].second)) {
911 default: assert(0 && "Unexpected ValueType for argument!");
913 Args[i].first = DAG.getNode(ISD::ANY_EXTEND, MVT::i8, Args[i].first);
918 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
919 RegValuesToPass.push_back(Args[i].first);
925 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
926 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
927 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
928 Args[i].first, PtrOff,
929 DAG.getSrcValue(NULL)));
934 // Can pass (at least) part of it in regs?
935 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
936 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
937 Args[i].first, DAG.getConstant(1, MVT::i32));
938 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
939 Args[i].first, DAG.getConstant(0, MVT::i32));
940 RegValuesToPass.push_back(Lo);
943 // Pass both parts in regs?
944 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
945 RegValuesToPass.push_back(Hi);
948 // Pass the high part in memory.
949 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
950 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
951 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
952 Hi, PtrOff, DAG.getSrcValue(NULL)));
959 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
960 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
961 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
962 Args[i].first, PtrOff,
963 DAG.getSrcValue(NULL)));
969 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
971 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
972 // arguments and the arguments after the retaddr has been pushed are aligned.
973 if ((ArgOffset & 7) == 0)
976 std::vector<MVT::ValueType> RetVals;
977 MVT::ValueType RetTyVT = getValueType(RetTy);
979 RetVals.push_back(MVT::Other);
981 // The result values produced have to be legal. Promote the result.
983 case MVT::isVoid: break;
985 RetVals.push_back(RetTyVT);
990 RetVals.push_back(MVT::i32);
994 RetVals.push_back(MVT::f32);
996 RetVals.push_back(MVT::f64);
999 RetVals.push_back(MVT::i32);
1000 RetVals.push_back(MVT::i32);
1004 // Build a sequence of copy-to-reg nodes chained together with token chain
1005 // and flag operands which copy the outgoing args into registers.
1007 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
1009 SDOperand RegToPass = RegValuesToPass[i];
1010 switch (RegToPass.getValueType()) {
1011 default: assert(0 && "Bad thing to pass in regs");
1013 CCReg = (i == 0) ? X86::AL : X86::DL;
1016 CCReg = (i == 0) ? X86::AX : X86::DX;
1019 CCReg = (i == 0) ? X86::EAX : X86::EDX;
1023 Chain = DAG.getCopyToReg(Chain, CCReg, RegToPass, InFlag);
1024 InFlag = Chain.getValue(1);
1027 std::vector<MVT::ValueType> NodeTys;
1028 NodeTys.push_back(MVT::Other); // Returns a chain
1029 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1030 std::vector<SDOperand> Ops;
1031 Ops.push_back(Chain);
1032 Ops.push_back(Callee);
1034 Ops.push_back(InFlag);
1036 // FIXME: Do not generate X86ISD::TAILCALL for now.
1037 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
1038 InFlag = Chain.getValue(1);
1041 NodeTys.push_back(MVT::Other); // Returns a chain
1042 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1044 Ops.push_back(Chain);
1045 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
1046 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
1047 Ops.push_back(InFlag);
1048 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, Ops);
1049 InFlag = Chain.getValue(1);
1052 if (RetTyVT != MVT::isVoid) {
1054 default: assert(0 && "Unknown value type to return!");
1057 RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
1058 Chain = RetVal.getValue(1);
1059 if (RetTyVT == MVT::i1)
1060 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::i1, RetVal);
1063 RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
1064 Chain = RetVal.getValue(1);
1067 RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
1068 Chain = RetVal.getValue(1);
1071 SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
1072 SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
1074 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
1075 Chain = Hi.getValue(1);
1080 std::vector<MVT::ValueType> Tys;
1081 Tys.push_back(MVT::f64);
1082 Tys.push_back(MVT::Other);
1083 Tys.push_back(MVT::Flag);
1084 std::vector<SDOperand> Ops;
1085 Ops.push_back(Chain);
1086 Ops.push_back(InFlag);
1087 RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
1088 Chain = RetVal.getValue(1);
1089 InFlag = RetVal.getValue(2);
1091 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
1092 // shouldn't be necessary except that RFP cannot be live across
1093 // multiple blocks. When stackifier is fixed, they can be uncoupled.
1094 MachineFunction &MF = DAG.getMachineFunction();
1095 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
1096 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1098 Tys.push_back(MVT::Other);
1100 Ops.push_back(Chain);
1101 Ops.push_back(RetVal);
1102 Ops.push_back(StackSlot);
1103 Ops.push_back(DAG.getValueType(RetTyVT));
1104 Ops.push_back(InFlag);
1105 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
1106 RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
1107 DAG.getSrcValue(NULL));
1108 Chain = RetVal.getValue(1);
1111 if (RetTyVT == MVT::f32 && !X86ScalarSSE)
1112 // FIXME: we would really like to remember that this FP_ROUND
1113 // operation is okay to eliminate if we allow excess FP precision.
1114 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
1120 return std::make_pair(RetVal, Chain);
1123 SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
1124 if (ReturnAddrIndex == 0) {
1125 // Set up a frame object for the return address.
1126 MachineFunction &MF = DAG.getMachineFunction();
1127 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
1130 return DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
1135 std::pair<SDOperand, SDOperand> X86TargetLowering::
1136 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
1137 SelectionDAG &DAG) {
1139 if (Depth) // Depths > 0 not supported yet!
1140 Result = DAG.getConstant(0, getPointerTy());
1142 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
1143 if (!isFrameAddress)
1144 // Just load the return address
1145 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI,
1146 DAG.getSrcValue(NULL));
1148 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
1149 DAG.getConstant(4, MVT::i32));
1151 return std::make_pair(Result, Chain);
1154 /// getCondBrOpcodeForX86CC - Returns the X86 conditional branch opcode
1155 /// which corresponds to the condition code.
1156 static unsigned getCondBrOpcodeForX86CC(unsigned X86CC) {
1158 default: assert(0 && "Unknown X86 conditional code!");
1159 case X86ISD::COND_A: return X86::JA;
1160 case X86ISD::COND_AE: return X86::JAE;
1161 case X86ISD::COND_B: return X86::JB;
1162 case X86ISD::COND_BE: return X86::JBE;
1163 case X86ISD::COND_E: return X86::JE;
1164 case X86ISD::COND_G: return X86::JG;
1165 case X86ISD::COND_GE: return X86::JGE;
1166 case X86ISD::COND_L: return X86::JL;
1167 case X86ISD::COND_LE: return X86::JLE;
1168 case X86ISD::COND_NE: return X86::JNE;
1169 case X86ISD::COND_NO: return X86::JNO;
1170 case X86ISD::COND_NP: return X86::JNP;
1171 case X86ISD::COND_NS: return X86::JNS;
1172 case X86ISD::COND_O: return X86::JO;
1173 case X86ISD::COND_P: return X86::JP;
1174 case X86ISD::COND_S: return X86::JS;
1178 /// translateX86CC - do a one to one translation of a ISD::CondCode to the X86
1179 /// specific condition code. It returns a false if it cannot do a direct
1180 /// translation. X86CC is the translated CondCode. Flip is set to true if the
1181 /// the order of comparison operands should be flipped.
1182 static bool translateX86CC(SDOperand CC, bool isFP, unsigned &X86CC,
1184 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
1186 X86CC = X86ISD::COND_INVALID;
1188 switch (SetCCOpcode) {
1190 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
1191 case ISD::SETGT: X86CC = X86ISD::COND_G; break;
1192 case ISD::SETGE: X86CC = X86ISD::COND_GE; break;
1193 case ISD::SETLT: X86CC = X86ISD::COND_L; break;
1194 case ISD::SETLE: X86CC = X86ISD::COND_LE; break;
1195 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1196 case ISD::SETULT: X86CC = X86ISD::COND_B; break;
1197 case ISD::SETUGT: X86CC = X86ISD::COND_A; break;
1198 case ISD::SETULE: X86CC = X86ISD::COND_BE; break;
1199 case ISD::SETUGE: X86CC = X86ISD::COND_AE; break;
1202 // On a floating point condition, the flags are set as follows:
1204 // 0 | 0 | 0 | X > Y
1205 // 0 | 0 | 1 | X < Y
1206 // 1 | 0 | 0 | X == Y
1207 // 1 | 1 | 1 | unordered
1208 switch (SetCCOpcode) {
1211 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
1212 case ISD::SETOLE: Flip = true; // Fallthrough
1214 case ISD::SETGT: X86CC = X86ISD::COND_A; break;
1215 case ISD::SETOLT: Flip = true; // Fallthrough
1217 case ISD::SETGE: X86CC = X86ISD::COND_AE; break;
1218 case ISD::SETUGE: Flip = true; // Fallthrough
1220 case ISD::SETLT: X86CC = X86ISD::COND_B; break;
1221 case ISD::SETUGT: Flip = true; // Fallthrough
1223 case ISD::SETLE: X86CC = X86ISD::COND_BE; break;
1225 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1226 case ISD::SETUO: X86CC = X86ISD::COND_P; break;
1227 case ISD::SETO: X86CC = X86ISD::COND_NP; break;
1231 return X86CC != X86ISD::COND_INVALID;
1234 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
1235 /// code. Current x86 isa includes the following FP cmov instructions:
1236 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
1237 static bool hasFPCMov(unsigned X86CC) {
1241 case X86ISD::COND_B:
1242 case X86ISD::COND_BE:
1243 case X86ISD::COND_E:
1244 case X86ISD::COND_P:
1245 case X86ISD::COND_A:
1246 case X86ISD::COND_AE:
1247 case X86ISD::COND_NE:
1248 case X86ISD::COND_NP:
1254 X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1255 MachineBasicBlock *BB) {
1256 switch (MI->getOpcode()) {
1257 default: assert(false && "Unexpected instr type to insert");
1258 case X86::CMOV_FR32:
1259 case X86::CMOV_FR64: {
1260 // To "insert" a SELECT_CC instruction, we actually have to insert the
1261 // diamond control-flow pattern. The incoming instruction knows the
1262 // destination vreg to set, the condition code register to branch on, the
1263 // true/false values to select between, and a branch opcode to use.
1264 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1265 ilist<MachineBasicBlock>::iterator It = BB;
1271 // cmpTY ccX, r1, r2
1273 // fallthrough --> copy0MBB
1274 MachineBasicBlock *thisMBB = BB;
1275 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
1276 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
1277 unsigned Opc = getCondBrOpcodeForX86CC(MI->getOperand(3).getImmedValue());
1278 BuildMI(BB, Opc, 1).addMBB(sinkMBB);
1279 MachineFunction *F = BB->getParent();
1280 F->getBasicBlockList().insert(It, copy0MBB);
1281 F->getBasicBlockList().insert(It, sinkMBB);
1282 // Update machine-CFG edges by first adding all successors of the current
1283 // block to the new block which will contain the Phi node for the select.
1284 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
1285 e = BB->succ_end(); i != e; ++i)
1286 sinkMBB->addSuccessor(*i);
1287 // Next, remove all successors of the current block, and add the true
1288 // and fallthrough blocks as its successors.
1289 while(!BB->succ_empty())
1290 BB->removeSuccessor(BB->succ_begin());
1291 BB->addSuccessor(copy0MBB);
1292 BB->addSuccessor(sinkMBB);
1295 // %FalseValue = ...
1296 // # fallthrough to sinkMBB
1299 // Update machine-CFG edges
1300 BB->addSuccessor(sinkMBB);
1303 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1306 BuildMI(BB, X86::PHI, 4, MI->getOperand(0).getReg())
1307 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
1308 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1310 delete MI; // The pseudo instruction is gone now.
1314 case X86::FP_TO_INT16_IN_MEM:
1315 case X86::FP_TO_INT32_IN_MEM:
1316 case X86::FP_TO_INT64_IN_MEM: {
1317 // Change the floating point control register to use "round towards zero"
1318 // mode when truncating to an integer value.
1319 MachineFunction *F = BB->getParent();
1320 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
1321 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1323 // Load the old value of the high byte of the control word...
1325 F->getSSARegMap()->createVirtualRegister(X86::R16RegisterClass);
1326 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
1328 // Set the high part to be round to zero...
1329 addFrameReference(BuildMI(BB, X86::MOV16mi, 5), CWFrameIdx).addImm(0xC7F);
1331 // Reload the modified control word now...
1332 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1334 // Restore the memory image of control word to original value
1335 addFrameReference(BuildMI(BB, X86::MOV16mr, 5), CWFrameIdx).addReg(OldCW);
1337 // Get the X86 opcode to use.
1339 switch (MI->getOpcode()) {
1340 default: assert(0 && "illegal opcode!");
1341 case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break;
1342 case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break;
1343 case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break;
1347 MachineOperand &Op = MI->getOperand(0);
1348 if (Op.isRegister()) {
1349 AM.BaseType = X86AddressMode::RegBase;
1350 AM.Base.Reg = Op.getReg();
1352 AM.BaseType = X86AddressMode::FrameIndexBase;
1353 AM.Base.FrameIndex = Op.getFrameIndex();
1355 Op = MI->getOperand(1);
1356 if (Op.isImmediate())
1357 AM.Scale = Op.getImmedValue();
1358 Op = MI->getOperand(2);
1359 if (Op.isImmediate())
1360 AM.IndexReg = Op.getImmedValue();
1361 Op = MI->getOperand(3);
1362 if (Op.isGlobalAddress()) {
1363 AM.GV = Op.getGlobal();
1365 AM.Disp = Op.getImmedValue();
1367 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(MI->getOperand(4).getReg());
1369 // Reload the original control word now.
1370 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1372 delete MI; // The pseudo instruction is gone now.
1379 //===----------------------------------------------------------------------===//
1380 // X86 Custom Lowering Hooks
1381 //===----------------------------------------------------------------------===//
1383 /// DarwinGVRequiresExtraLoad - true if accessing the GV requires an extra
1384 /// load. For Darwin, external and weak symbols are indirect, loading the value
1385 /// at address GV rather then the value of GV itself. This means that the
1386 /// GlobalAddress must be in the base or index register of the address, not the
1387 /// GV offset field.
1388 static bool DarwinGVRequiresExtraLoad(GlobalValue *GV) {
1389 return (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
1390 (GV->isExternal() && !GV->hasNotBeenReadFromBytecode()));
1393 /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
1394 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
1395 bool X86::isPSHUFDMask(SDNode *N) {
1396 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1398 if (N->getNumOperands() != 4)
1401 // Check if the value doesn't reference the second vector.
1402 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1403 SDOperand Arg = N->getOperand(i);
1404 if (Arg.getOpcode() == ISD::UNDEF) continue;
1405 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1406 if (cast<ConstantSDNode>(Arg)->getValue() >= 4)
1413 /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
1414 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
1415 bool X86::isPSHUFHWMask(SDNode *N) {
1416 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1418 if (N->getNumOperands() != 8)
1421 // Lower quadword copied in order.
1422 for (unsigned i = 0; i != 4; ++i) {
1423 SDOperand Arg = N->getOperand(i);
1424 if (Arg.getOpcode() == ISD::UNDEF) continue;
1425 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1426 if (cast<ConstantSDNode>(Arg)->getValue() != i)
1430 // Upper quadword shuffled.
1431 for (unsigned i = 4; i != 8; ++i) {
1432 SDOperand Arg = N->getOperand(i);
1433 if (Arg.getOpcode() == ISD::UNDEF) continue;
1434 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1435 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1436 if (Val < 4 || Val > 7)
1443 /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
1444 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
1445 bool X86::isPSHUFLWMask(SDNode *N) {
1446 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1448 if (N->getNumOperands() != 8)
1451 // Upper quadword copied in order.
1452 for (unsigned i = 4; i != 8; ++i) {
1453 SDOperand Arg = N->getOperand(i);
1454 if (Arg.getOpcode() == ISD::UNDEF) continue;
1455 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1456 if (cast<ConstantSDNode>(Arg)->getValue() != i)
1460 // Lower quadword shuffled.
1461 for (unsigned i = 0; i != 4; ++i) {
1462 SDOperand Arg = N->getOperand(i);
1463 if (Arg.getOpcode() == ISD::UNDEF) continue;
1464 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1465 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1473 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
1474 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
1475 bool X86::isSHUFPMask(SDNode *N) {
1476 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1478 unsigned NumElems = N->getNumOperands();
1479 if (NumElems == 2) {
1480 // The only case that ought be handled by SHUFPD is
1481 // Dest { 2, 1 } <= shuffle( Dest { 1, 0 }, Src { 3, 2 }
1482 // Expect bit 0 == 1, bit1 == 2
1483 SDOperand Bit0 = N->getOperand(0);
1484 SDOperand Bit1 = N->getOperand(1);
1485 assert(isa<ConstantSDNode>(Bit0) && isa<ConstantSDNode>(Bit1) &&
1486 "Invalid VECTOR_SHUFFLE mask!");
1487 return (cast<ConstantSDNode>(Bit0)->getValue() == 1 &&
1488 cast<ConstantSDNode>(Bit1)->getValue() == 2);
1491 if (NumElems != 4) return false;
1493 // Each half must refer to only one of the vector.
1494 for (unsigned i = 0; i < 2; ++i) {
1495 SDOperand Arg = N->getOperand(i);
1496 if (Arg.getOpcode() == ISD::UNDEF) continue;
1497 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1498 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1499 if (Val >= 4) return false;
1501 for (unsigned i = 2; i < 4; ++i) {
1502 SDOperand Arg = N->getOperand(i);
1503 if (Arg.getOpcode() == ISD::UNDEF) continue;
1504 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1505 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1506 if (Val < 4) return false;
1512 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
1513 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
1514 bool X86::isMOVHLPSMask(SDNode *N) {
1515 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1517 if (N->getNumOperands() != 4)
1520 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
1521 SDOperand Bit0 = N->getOperand(0);
1522 SDOperand Bit1 = N->getOperand(1);
1523 SDOperand Bit2 = N->getOperand(2);
1524 SDOperand Bit3 = N->getOperand(3);
1526 if (Bit0.getOpcode() != ISD::UNDEF) {
1527 assert(isa<ConstantSDNode>(Bit0) && "Invalid VECTOR_SHUFFLE mask!");
1528 if (cast<ConstantSDNode>(Bit0)->getValue() != 6)
1532 if (Bit1.getOpcode() != ISD::UNDEF) {
1533 assert(isa<ConstantSDNode>(Bit1) && "Invalid VECTOR_SHUFFLE mask!");
1534 if (cast<ConstantSDNode>(Bit1)->getValue() != 7)
1538 if (Bit2.getOpcode() != ISD::UNDEF) {
1539 assert(isa<ConstantSDNode>(Bit2) && "Invalid VECTOR_SHUFFLE mask!");
1540 if (cast<ConstantSDNode>(Bit2)->getValue() != 2)
1544 if (Bit3.getOpcode() != ISD::UNDEF) {
1545 assert(isa<ConstantSDNode>(Bit3) && "Invalid VECTOR_SHUFFLE mask!");
1546 if (cast<ConstantSDNode>(Bit3)->getValue() != 3)
1553 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
1554 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
1555 bool X86::isMOVLHPSMask(SDNode *N) {
1556 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1558 if (N->getNumOperands() != 4)
1561 // Expect bit0 == 0, bit1 == 1, bit2 == 4, bit3 == 5
1562 SDOperand Bit0 = N->getOperand(0);
1563 SDOperand Bit1 = N->getOperand(1);
1564 SDOperand Bit2 = N->getOperand(2);
1565 SDOperand Bit3 = N->getOperand(3);
1567 if (Bit0.getOpcode() != ISD::UNDEF) {
1568 assert(isa<ConstantSDNode>(Bit0) && "Invalid VECTOR_SHUFFLE mask!");
1569 if (cast<ConstantSDNode>(Bit0)->getValue() != 0)
1573 if (Bit1.getOpcode() != ISD::UNDEF) {
1574 assert(isa<ConstantSDNode>(Bit1) && "Invalid VECTOR_SHUFFLE mask!");
1575 if (cast<ConstantSDNode>(Bit1)->getValue() != 1)
1579 if (Bit2.getOpcode() != ISD::UNDEF) {
1580 assert(isa<ConstantSDNode>(Bit2) && "Invalid VECTOR_SHUFFLE mask!");
1581 if (cast<ConstantSDNode>(Bit2)->getValue() != 4)
1585 if (Bit3.getOpcode() != ISD::UNDEF) {
1586 assert(isa<ConstantSDNode>(Bit3) && "Invalid VECTOR_SHUFFLE mask!");
1587 if (cast<ConstantSDNode>(Bit3)->getValue() != 5)
1594 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
1595 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
1596 bool X86::isUNPCKLMask(SDNode *N) {
1597 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1599 unsigned NumElems = N->getNumOperands();
1600 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
1603 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
1604 SDOperand BitI = N->getOperand(i);
1605 SDOperand BitI1 = N->getOperand(i+1);
1607 if (BitI.getOpcode() != ISD::UNDEF) {
1608 assert(isa<ConstantSDNode>(BitI) && "Invalid VECTOR_SHUFFLE mask!");
1609 if (cast<ConstantSDNode>(BitI)->getValue() != j)
1613 if (BitI1.getOpcode() != ISD::UNDEF) {
1614 assert(isa<ConstantSDNode>(BitI1) && "Invalid VECTOR_SHUFFLE mask!");
1615 if (cast<ConstantSDNode>(BitI)->getValue() != j + NumElems)
1623 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
1624 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
1625 bool X86::isUNPCKHMask(SDNode *N) {
1626 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1628 unsigned NumElems = N->getNumOperands();
1629 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
1632 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
1633 SDOperand BitI = N->getOperand(i);
1634 SDOperand BitI1 = N->getOperand(i+1);
1636 if (BitI.getOpcode() != ISD::UNDEF) {
1637 assert(isa<ConstantSDNode>(BitI) && "Invalid VECTOR_SHUFFLE mask!");
1638 if (cast<ConstantSDNode>(BitI)->getValue() != j + NumElems/2)
1642 if (BitI1.getOpcode() != ISD::UNDEF) {
1643 assert(isa<ConstantSDNode>(BitI1) && "Invalid VECTOR_SHUFFLE mask!");
1644 if (cast<ConstantSDNode>(BitI)->getValue() != j + NumElems/2 + NumElems)
1652 /// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
1653 /// a splat of a single element.
1654 bool X86::isSplatMask(SDNode *N) {
1655 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1657 // We can only splat 64-bit, and 32-bit quantities.
1658 if (N->getNumOperands() != 4 && N->getNumOperands() != 2)
1661 // This is a splat operation if each element of the permute is the same, and
1662 // if the value doesn't reference the second vector.
1663 SDOperand Elt = N->getOperand(0);
1664 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
1665 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) {
1666 SDOperand Arg = N->getOperand(i);
1667 if (Arg.getOpcode() == ISD::UNDEF) continue;
1668 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1669 if (Arg != Elt) return false;
1672 // Make sure it is a splat of the first vector operand.
1673 return cast<ConstantSDNode>(Elt)->getValue() < N->getNumOperands();
1676 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
1677 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
1679 unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
1680 unsigned NumOperands = N->getNumOperands();
1681 unsigned Shift = (NumOperands == 4) ? 2 : 1;
1683 for (unsigned i = 0; i < NumOperands; ++i) {
1685 SDOperand Arg = N->getOperand(NumOperands-i-1);
1686 if (Arg.getOpcode() != ISD::UNDEF)
1687 Val = cast<ConstantSDNode>(Arg)->getValue();
1688 if (Val >= NumOperands) Val -= NumOperands;
1690 if (i != NumOperands - 1)
1697 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
1698 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
1700 unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) {
1702 // 8 nodes, but we only care about the last 4.
1703 for (unsigned i = 7; i >= 4; --i) {
1705 SDOperand Arg = N->getOperand(i);
1706 if (Arg.getOpcode() != ISD::UNDEF)
1707 Val = cast<ConstantSDNode>(Arg)->getValue();
1716 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
1717 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
1719 unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) {
1721 // 8 nodes, but we only care about the first 4.
1722 for (int i = 3; i >= 0; --i) {
1724 SDOperand Arg = N->getOperand(i);
1725 if (Arg.getOpcode() != ISD::UNDEF)
1726 Val = cast<ConstantSDNode>(Arg)->getValue();
1735 /// NormalizeVectorShuffle - Swap vector_shuffle operands (as well as
1736 /// values in ther permute mask if needed. Use V1 as second vector if it is
1737 /// undef. Return an empty SDOperand is it is already well formed.
1738 static SDOperand NormalizeVectorShuffle(SDOperand V1, SDOperand V2,
1739 SDOperand Mask, MVT::ValueType VT,
1740 SelectionDAG &DAG) {
1741 unsigned NumElems = Mask.getNumOperands();
1742 SDOperand Half1 = Mask.getOperand(0);
1743 SDOperand Half2 = Mask.getOperand(NumElems/2);
1744 bool V2Undef = false;
1745 if (V2.getOpcode() == ISD::UNDEF) {
1750 if (cast<ConstantSDNode>(Half1)->getValue() >= NumElems &&
1751 cast<ConstantSDNode>(Half2)->getValue() < NumElems) {
1752 // Swap the operands and change mask.
1753 std::vector<SDOperand> MaskVec;
1754 for (unsigned i = NumElems / 2; i != NumElems; ++i)
1755 MaskVec.push_back(Mask.getOperand(i));
1756 for (unsigned i = 0; i != NumElems / 2; ++i)
1757 MaskVec.push_back(Mask.getOperand(i));
1759 DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), MaskVec);
1760 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V2, V1, Mask);
1764 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask);
1769 /// LowerOperation - Provide custom lowering hooks for some operations.
1771 SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
1772 switch (Op.getOpcode()) {
1773 default: assert(0 && "Should not custom lower this!");
1774 case ISD::SHL_PARTS:
1775 case ISD::SRA_PARTS:
1776 case ISD::SRL_PARTS: {
1777 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
1778 "Not an i64 shift!");
1779 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
1780 SDOperand ShOpLo = Op.getOperand(0);
1781 SDOperand ShOpHi = Op.getOperand(1);
1782 SDOperand ShAmt = Op.getOperand(2);
1783 SDOperand Tmp1 = isSRA ? DAG.getNode(ISD::SRA, MVT::i32, ShOpHi,
1784 DAG.getConstant(31, MVT::i8))
1785 : DAG.getConstant(0, MVT::i32);
1787 SDOperand Tmp2, Tmp3;
1788 if (Op.getOpcode() == ISD::SHL_PARTS) {
1789 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt);
1790 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt);
1792 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt);
1793 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt);
1796 SDOperand InFlag = DAG.getNode(X86ISD::TEST, MVT::Flag,
1797 ShAmt, DAG.getConstant(32, MVT::i8));
1800 SDOperand CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
1802 std::vector<MVT::ValueType> Tys;
1803 Tys.push_back(MVT::i32);
1804 Tys.push_back(MVT::Flag);
1805 std::vector<SDOperand> Ops;
1806 if (Op.getOpcode() == ISD::SHL_PARTS) {
1807 Ops.push_back(Tmp2);
1808 Ops.push_back(Tmp3);
1810 Ops.push_back(InFlag);
1811 Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1812 InFlag = Hi.getValue(1);
1815 Ops.push_back(Tmp3);
1816 Ops.push_back(Tmp1);
1818 Ops.push_back(InFlag);
1819 Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1821 Ops.push_back(Tmp2);
1822 Ops.push_back(Tmp3);
1824 Ops.push_back(InFlag);
1825 Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1826 InFlag = Lo.getValue(1);
1829 Ops.push_back(Tmp3);
1830 Ops.push_back(Tmp1);
1832 Ops.push_back(InFlag);
1833 Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1837 Tys.push_back(MVT::i32);
1838 Tys.push_back(MVT::i32);
1842 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
1844 case ISD::SINT_TO_FP: {
1845 assert(Op.getOperand(0).getValueType() <= MVT::i64 &&
1846 Op.getOperand(0).getValueType() >= MVT::i16 &&
1847 "Unknown SINT_TO_FP to lower!");
1850 MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
1851 unsigned Size = MVT::getSizeInBits(SrcVT)/8;
1852 MachineFunction &MF = DAG.getMachineFunction();
1853 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
1854 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1855 SDOperand Chain = DAG.getNode(ISD::STORE, MVT::Other,
1856 DAG.getEntryNode(), Op.getOperand(0),
1857 StackSlot, DAG.getSrcValue(NULL));
1860 std::vector<MVT::ValueType> Tys;
1861 Tys.push_back(MVT::f64);
1862 Tys.push_back(MVT::Other);
1863 if (X86ScalarSSE) Tys.push_back(MVT::Flag);
1864 std::vector<SDOperand> Ops;
1865 Ops.push_back(Chain);
1866 Ops.push_back(StackSlot);
1867 Ops.push_back(DAG.getValueType(SrcVT));
1868 Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
1872 Chain = Result.getValue(1);
1873 SDOperand InFlag = Result.getValue(2);
1875 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
1876 // shouldn't be necessary except that RFP cannot be live across
1877 // multiple blocks. When stackifier is fixed, they can be uncoupled.
1878 MachineFunction &MF = DAG.getMachineFunction();
1879 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
1880 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1881 std::vector<MVT::ValueType> Tys;
1882 Tys.push_back(MVT::Other);
1883 std::vector<SDOperand> Ops;
1884 Ops.push_back(Chain);
1885 Ops.push_back(Result);
1886 Ops.push_back(StackSlot);
1887 Ops.push_back(DAG.getValueType(Op.getValueType()));
1888 Ops.push_back(InFlag);
1889 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
1890 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot,
1891 DAG.getSrcValue(NULL));
1896 case ISD::FP_TO_SINT: {
1897 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
1898 "Unknown FP_TO_SINT to lower!");
1899 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
1901 MachineFunction &MF = DAG.getMachineFunction();
1902 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
1903 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
1904 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1907 switch (Op.getValueType()) {
1908 default: assert(0 && "Invalid FP_TO_SINT to lower!");
1909 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
1910 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
1911 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
1914 SDOperand Chain = DAG.getEntryNode();
1915 SDOperand Value = Op.getOperand(0);
1917 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
1918 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value, StackSlot,
1919 DAG.getSrcValue(0));
1920 std::vector<MVT::ValueType> Tys;
1921 Tys.push_back(MVT::f64);
1922 Tys.push_back(MVT::Other);
1923 std::vector<SDOperand> Ops;
1924 Ops.push_back(Chain);
1925 Ops.push_back(StackSlot);
1926 Ops.push_back(DAG.getValueType(Op.getOperand(0).getValueType()));
1927 Value = DAG.getNode(X86ISD::FLD, Tys, Ops);
1928 Chain = Value.getValue(1);
1929 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
1930 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1933 // Build the FP_TO_INT*_IN_MEM
1934 std::vector<SDOperand> Ops;
1935 Ops.push_back(Chain);
1936 Ops.push_back(Value);
1937 Ops.push_back(StackSlot);
1938 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops);
1941 return DAG.getLoad(Op.getValueType(), FIST, StackSlot,
1942 DAG.getSrcValue(NULL));
1944 case ISD::READCYCLECOUNTER: {
1945 std::vector<MVT::ValueType> Tys;
1946 Tys.push_back(MVT::Other);
1947 Tys.push_back(MVT::Flag);
1948 std::vector<SDOperand> Ops;
1949 Ops.push_back(Op.getOperand(0));
1950 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, Ops);
1952 Ops.push_back(DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)));
1953 Ops.push_back(DAG.getCopyFromReg(Ops[0].getValue(1), X86::EDX,
1954 MVT::i32, Ops[0].getValue(2)));
1955 Ops.push_back(Ops[1].getValue(1));
1956 Tys[0] = Tys[1] = MVT::i32;
1957 Tys.push_back(MVT::Other);
1958 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
1961 MVT::ValueType VT = Op.getValueType();
1962 const Type *OpNTy = MVT::getTypeForValueType(VT);
1963 std::vector<Constant*> CV;
1964 if (VT == MVT::f64) {
1965 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63))));
1966 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1968 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31))));
1969 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1970 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1971 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1973 Constant *CS = ConstantStruct::get(CV);
1974 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
1976 = DAG.getNode(X86ISD::LOAD_PACK,
1977 VT, DAG.getEntryNode(), CPIdx, DAG.getSrcValue(NULL));
1978 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask);
1981 MVT::ValueType VT = Op.getValueType();
1982 const Type *OpNTy = MVT::getTypeForValueType(VT);
1983 std::vector<Constant*> CV;
1984 if (VT == MVT::f64) {
1985 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63)));
1986 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1988 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31)));
1989 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1990 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1991 CV.push_back(ConstantFP::get(OpNTy, 0.0));
1993 Constant *CS = ConstantStruct::get(CV);
1994 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
1996 = DAG.getNode(X86ISD::LOAD_PACK,
1997 VT, DAG.getEntryNode(), CPIdx, DAG.getSrcValue(NULL));
1998 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask);
2001 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
2003 SDOperand CC = Op.getOperand(2);
2004 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
2005 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
2008 if (translateX86CC(CC, isFP, X86CC, Flip)) {
2010 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
2011 Op.getOperand(1), Op.getOperand(0));
2013 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
2014 Op.getOperand(0), Op.getOperand(1));
2015 return DAG.getNode(X86ISD::SETCC, MVT::i8,
2016 DAG.getConstant(X86CC, MVT::i8), Cond);
2018 assert(isFP && "Illegal integer SetCC!");
2020 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
2021 Op.getOperand(0), Op.getOperand(1));
2022 std::vector<MVT::ValueType> Tys;
2023 std::vector<SDOperand> Ops;
2024 switch (SetCCOpcode) {
2025 default: assert(false && "Illegal floating point SetCC!");
2026 case ISD::SETOEQ: { // !PF & ZF
2027 Tys.push_back(MVT::i8);
2028 Tys.push_back(MVT::Flag);
2029 Ops.push_back(DAG.getConstant(X86ISD::COND_NP, MVT::i8));
2030 Ops.push_back(Cond);
2031 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
2032 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
2033 DAG.getConstant(X86ISD::COND_E, MVT::i8),
2035 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
2037 case ISD::SETUNE: { // PF | !ZF
2038 Tys.push_back(MVT::i8);
2039 Tys.push_back(MVT::Flag);
2040 Ops.push_back(DAG.getConstant(X86ISD::COND_P, MVT::i8));
2041 Ops.push_back(Cond);
2042 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
2043 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
2044 DAG.getConstant(X86ISD::COND_NE, MVT::i8),
2046 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
2052 MVT::ValueType VT = Op.getValueType();
2053 bool isFP = MVT::isFloatingPoint(VT);
2054 bool isFPStack = isFP && !X86ScalarSSE;
2055 bool isFPSSE = isFP && X86ScalarSSE;
2056 bool addTest = false;
2057 SDOperand Op0 = Op.getOperand(0);
2059 if (Op0.getOpcode() == ISD::SETCC)
2060 Op0 = LowerOperation(Op0, DAG);
2062 if (Op0.getOpcode() == X86ISD::SETCC) {
2063 // If condition flag is set by a X86ISD::CMP, then make a copy of it
2064 // (since flag operand cannot be shared). If the X86ISD::SETCC does not
2065 // have another use it will be eliminated.
2066 // If the X86ISD::SETCC has more than one use, then it's probably better
2067 // to use a test instead of duplicating the X86ISD::CMP (for register
2068 // pressure reason).
2069 if (Op0.getOperand(1).getOpcode() == X86ISD::CMP) {
2070 if (!Op0.hasOneUse()) {
2071 std::vector<MVT::ValueType> Tys;
2072 for (unsigned i = 0; i < Op0.Val->getNumValues(); ++i)
2073 Tys.push_back(Op0.Val->getValueType(i));
2074 std::vector<SDOperand> Ops;
2075 for (unsigned i = 0; i < Op0.getNumOperands(); ++i)
2076 Ops.push_back(Op0.getOperand(i));
2077 Op0 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
2080 CC = Op0.getOperand(0);
2081 Cond = Op0.getOperand(1);
2082 // Make a copy as flag result cannot be used by more than one.
2083 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
2084 Cond.getOperand(0), Cond.getOperand(1));
2086 isFPStack && !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
2093 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
2094 Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Op0, Op0);
2097 std::vector<MVT::ValueType> Tys;
2098 Tys.push_back(Op.getValueType());
2099 Tys.push_back(MVT::Flag);
2100 std::vector<SDOperand> Ops;
2101 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
2102 // condition is true.
2103 Ops.push_back(Op.getOperand(2));
2104 Ops.push_back(Op.getOperand(1));
2106 Ops.push_back(Cond);
2107 return DAG.getNode(X86ISD::CMOV, Tys, Ops);
2110 bool addTest = false;
2111 SDOperand Cond = Op.getOperand(1);
2112 SDOperand Dest = Op.getOperand(2);
2114 if (Cond.getOpcode() == ISD::SETCC)
2115 Cond = LowerOperation(Cond, DAG);
2117 if (Cond.getOpcode() == X86ISD::SETCC) {
2118 // If condition flag is set by a X86ISD::CMP, then make a copy of it
2119 // (since flag operand cannot be shared). If the X86ISD::SETCC does not
2120 // have another use it will be eliminated.
2121 // If the X86ISD::SETCC has more than one use, then it's probably better
2122 // to use a test instead of duplicating the X86ISD::CMP (for register
2123 // pressure reason).
2124 if (Cond.getOperand(1).getOpcode() == X86ISD::CMP) {
2125 if (!Cond.hasOneUse()) {
2126 std::vector<MVT::ValueType> Tys;
2127 for (unsigned i = 0; i < Cond.Val->getNumValues(); ++i)
2128 Tys.push_back(Cond.Val->getValueType(i));
2129 std::vector<SDOperand> Ops;
2130 for (unsigned i = 0; i < Cond.getNumOperands(); ++i)
2131 Ops.push_back(Cond.getOperand(i));
2132 Cond = DAG.getNode(X86ISD::SETCC, Tys, Ops);
2135 CC = Cond.getOperand(0);
2136 Cond = Cond.getOperand(1);
2137 // Make a copy as flag result cannot be used by more than one.
2138 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
2139 Cond.getOperand(0), Cond.getOperand(1));
2146 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
2147 Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Cond, Cond);
2149 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
2150 Op.getOperand(0), Op.getOperand(2), CC, Cond);
2153 SDOperand InFlag(0, 0);
2154 SDOperand Chain = Op.getOperand(0);
2156 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
2157 if (Align == 0) Align = 1;
2159 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
2160 // If not DWORD aligned, call memset if size is less than the threshold.
2161 // It knows how to align to the right boundary first.
2162 if ((Align & 3) != 0 ||
2163 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
2164 MVT::ValueType IntPtr = getPointerTy();
2165 const Type *IntPtrTy = getTargetData().getIntPtrType();
2166 std::vector<std::pair<SDOperand, const Type*> > Args;
2167 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
2168 // Extend the ubyte argument to be an int value for the call.
2169 SDOperand Val = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2));
2170 Args.push_back(std::make_pair(Val, IntPtrTy));
2171 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy));
2172 std::pair<SDOperand,SDOperand> CallResult =
2173 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false,
2174 DAG.getExternalSymbol("memset", IntPtr), Args, DAG);
2175 return CallResult.second;
2180 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2181 unsigned BytesLeft = 0;
2182 bool TwoRepStos = false;
2185 unsigned Val = ValC->getValue() & 255;
2187 // If the value is a constant, then we can potentially use larger sets.
2188 switch (Align & 3) {
2189 case 2: // WORD aligned
2191 Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
2192 BytesLeft = I->getValue() % 2;
2193 Val = (Val << 8) | Val;
2196 case 0: // DWORD aligned
2199 Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
2200 BytesLeft = I->getValue() % 4;
2202 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
2203 DAG.getConstant(2, MVT::i8));
2206 Val = (Val << 8) | Val;
2207 Val = (Val << 16) | Val;
2210 default: // Byte aligned
2212 Count = Op.getOperand(3);
2217 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT),
2219 InFlag = Chain.getValue(1);
2222 Count = Op.getOperand(3);
2223 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag);
2224 InFlag = Chain.getValue(1);
2227 Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
2228 InFlag = Chain.getValue(1);
2229 Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
2230 InFlag = Chain.getValue(1);
2232 std::vector<MVT::ValueType> Tys;
2233 Tys.push_back(MVT::Other);
2234 Tys.push_back(MVT::Flag);
2235 std::vector<SDOperand> Ops;
2236 Ops.push_back(Chain);
2237 Ops.push_back(DAG.getValueType(AVT));
2238 Ops.push_back(InFlag);
2239 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, Ops);
2242 InFlag = Chain.getValue(1);
2243 Count = Op.getOperand(3);
2244 MVT::ValueType CVT = Count.getValueType();
2245 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
2246 DAG.getConstant(3, CVT));
2247 Chain = DAG.getCopyToReg(Chain, X86::ECX, Left, InFlag);
2248 InFlag = Chain.getValue(1);
2250 Tys.push_back(MVT::Other);
2251 Tys.push_back(MVT::Flag);
2253 Ops.push_back(Chain);
2254 Ops.push_back(DAG.getValueType(MVT::i8));
2255 Ops.push_back(InFlag);
2256 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, Ops);
2257 } else if (BytesLeft) {
2258 // Issue stores for the last 1 - 3 bytes.
2260 unsigned Val = ValC->getValue() & 255;
2261 unsigned Offset = I->getValue() - BytesLeft;
2262 SDOperand DstAddr = Op.getOperand(1);
2263 MVT::ValueType AddrVT = DstAddr.getValueType();
2264 if (BytesLeft >= 2) {
2265 Value = DAG.getConstant((Val << 8) | Val, MVT::i16);
2266 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2267 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
2268 DAG.getConstant(Offset, AddrVT)),
2269 DAG.getSrcValue(NULL));
2274 if (BytesLeft == 1) {
2275 Value = DAG.getConstant(Val, MVT::i8);
2276 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2277 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
2278 DAG.getConstant(Offset, AddrVT)),
2279 DAG.getSrcValue(NULL));
2286 SDOperand Chain = Op.getOperand(0);
2288 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
2289 if (Align == 0) Align = 1;
2291 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
2292 // If not DWORD aligned, call memcpy if size is less than the threshold.
2293 // It knows how to align to the right boundary first.
2294 if ((Align & 3) != 0 ||
2295 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
2296 MVT::ValueType IntPtr = getPointerTy();
2297 const Type *IntPtrTy = getTargetData().getIntPtrType();
2298 std::vector<std::pair<SDOperand, const Type*> > Args;
2299 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
2300 Args.push_back(std::make_pair(Op.getOperand(2), IntPtrTy));
2301 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy));
2302 std::pair<SDOperand,SDOperand> CallResult =
2303 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false,
2304 DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG);
2305 return CallResult.second;
2310 unsigned BytesLeft = 0;
2311 bool TwoRepMovs = false;
2312 switch (Align & 3) {
2313 case 2: // WORD aligned
2315 Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
2316 BytesLeft = I->getValue() % 2;
2318 case 0: // DWORD aligned
2321 Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
2322 BytesLeft = I->getValue() % 4;
2324 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
2325 DAG.getConstant(2, MVT::i8));
2329 default: // Byte aligned
2331 Count = Op.getOperand(3);
2335 SDOperand InFlag(0, 0);
2336 Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
2337 InFlag = Chain.getValue(1);
2338 Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
2339 InFlag = Chain.getValue(1);
2340 Chain = DAG.getCopyToReg(Chain, X86::ESI, Op.getOperand(2), InFlag);
2341 InFlag = Chain.getValue(1);
2343 std::vector<MVT::ValueType> Tys;
2344 Tys.push_back(MVT::Other);
2345 Tys.push_back(MVT::Flag);
2346 std::vector<SDOperand> Ops;
2347 Ops.push_back(Chain);
2348 Ops.push_back(DAG.getValueType(AVT));
2349 Ops.push_back(InFlag);
2350 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, Ops);
2353 InFlag = Chain.getValue(1);
2354 Count = Op.getOperand(3);
2355 MVT::ValueType CVT = Count.getValueType();
2356 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
2357 DAG.getConstant(3, CVT));
2358 Chain = DAG.getCopyToReg(Chain, X86::ECX, Left, InFlag);
2359 InFlag = Chain.getValue(1);
2361 Tys.push_back(MVT::Other);
2362 Tys.push_back(MVT::Flag);
2364 Ops.push_back(Chain);
2365 Ops.push_back(DAG.getValueType(MVT::i8));
2366 Ops.push_back(InFlag);
2367 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, Ops);
2368 } else if (BytesLeft) {
2369 // Issue loads and stores for the last 1 - 3 bytes.
2370 unsigned Offset = I->getValue() - BytesLeft;
2371 SDOperand DstAddr = Op.getOperand(1);
2372 MVT::ValueType DstVT = DstAddr.getValueType();
2373 SDOperand SrcAddr = Op.getOperand(2);
2374 MVT::ValueType SrcVT = SrcAddr.getValueType();
2376 if (BytesLeft >= 2) {
2377 Value = DAG.getLoad(MVT::i16, Chain,
2378 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
2379 DAG.getConstant(Offset, SrcVT)),
2380 DAG.getSrcValue(NULL));
2381 Chain = Value.getValue(1);
2382 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2383 DAG.getNode(ISD::ADD, DstVT, DstAddr,
2384 DAG.getConstant(Offset, DstVT)),
2385 DAG.getSrcValue(NULL));
2390 if (BytesLeft == 1) {
2391 Value = DAG.getLoad(MVT::i8, Chain,
2392 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
2393 DAG.getConstant(Offset, SrcVT)),
2394 DAG.getSrcValue(NULL));
2395 Chain = Value.getValue(1);
2396 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2397 DAG.getNode(ISD::ADD, DstVT, DstAddr,
2398 DAG.getConstant(Offset, DstVT)),
2399 DAG.getSrcValue(NULL));
2406 // ConstantPool, GlobalAddress, and ExternalSymbol are lowered as their
2407 // target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
2408 // one of the above mentioned nodes. It has to be wrapped because otherwise
2409 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2410 // be used to form addressing mode. These wrapped nodes will be selected
2412 case ISD::ConstantPool: {
2413 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2414 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
2415 DAG.getTargetConstantPool(CP->get(), getPointerTy(),
2416 CP->getAlignment()));
2417 if (Subtarget->isTargetDarwin()) {
2418 // With PIC, the address is actually $g + Offset.
2419 if (getTargetMachine().getRelocationModel() == Reloc::PIC)
2420 Result = DAG.getNode(ISD::ADD, getPointerTy(),
2421 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
2426 case ISD::GlobalAddress: {
2427 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2428 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
2429 DAG.getTargetGlobalAddress(GV, getPointerTy()));
2430 if (Subtarget->isTargetDarwin()) {
2431 // With PIC, the address is actually $g + Offset.
2432 if (getTargetMachine().getRelocationModel() == Reloc::PIC)
2433 Result = DAG.getNode(ISD::ADD, getPointerTy(),
2434 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
2436 // For Darwin, external and weak symbols are indirect, so we want to load
2437 // the value at address GV, not the value of GV itself. This means that
2438 // the GlobalAddress must be in the base or index register of the address,
2439 // not the GV offset field.
2440 if (getTargetMachine().getRelocationModel() != Reloc::Static &&
2441 DarwinGVRequiresExtraLoad(GV))
2442 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(),
2443 Result, DAG.getSrcValue(NULL));
2448 case ISD::ExternalSymbol: {
2449 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
2450 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
2451 DAG.getTargetExternalSymbol(Sym, getPointerTy()));
2452 if (Subtarget->isTargetDarwin()) {
2453 // With PIC, the address is actually $g + Offset.
2454 if (getTargetMachine().getRelocationModel() == Reloc::PIC)
2455 Result = DAG.getNode(ISD::ADD, getPointerTy(),
2456 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
2461 case ISD::VASTART: {
2462 // vastart just stores the address of the VarArgsFrameIndex slot into the
2463 // memory location argument.
2464 // FIXME: Replace MVT::i32 with PointerTy
2465 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
2466 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
2467 Op.getOperand(1), Op.getOperand(2));
2472 switch(Op.getNumOperands()) {
2474 assert(0 && "Do not know how to return this many arguments!");
2477 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Op.getOperand(0),
2478 DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
2480 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
2481 if (MVT::isInteger(ArgVT))
2482 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::EAX, Op.getOperand(1),
2484 else if (!X86ScalarSSE) {
2485 std::vector<MVT::ValueType> Tys;
2486 Tys.push_back(MVT::Other);
2487 Tys.push_back(MVT::Flag);
2488 std::vector<SDOperand> Ops;
2489 Ops.push_back(Op.getOperand(0));
2490 Ops.push_back(Op.getOperand(1));
2491 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
2494 SDOperand Chain = Op.getOperand(0);
2495 SDOperand Value = Op.getOperand(1);
2497 if (Value.getOpcode() == ISD::LOAD &&
2498 (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) {
2499 Chain = Value.getOperand(0);
2500 MemLoc = Value.getOperand(1);
2502 // Spill the value to memory and reload it into top of stack.
2503 unsigned Size = MVT::getSizeInBits(ArgVT)/8;
2504 MachineFunction &MF = DAG.getMachineFunction();
2505 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
2506 MemLoc = DAG.getFrameIndex(SSFI, getPointerTy());
2507 Chain = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
2508 Value, MemLoc, DAG.getSrcValue(0));
2510 std::vector<MVT::ValueType> Tys;
2511 Tys.push_back(MVT::f64);
2512 Tys.push_back(MVT::Other);
2513 std::vector<SDOperand> Ops;
2514 Ops.push_back(Chain);
2515 Ops.push_back(MemLoc);
2516 Ops.push_back(DAG.getValueType(ArgVT));
2517 Copy = DAG.getNode(X86ISD::FLD, Tys, Ops);
2519 Tys.push_back(MVT::Other);
2520 Tys.push_back(MVT::Flag);
2522 Ops.push_back(Copy.getValue(1));
2523 Ops.push_back(Copy);
2524 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
2529 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::EDX, Op.getOperand(2),
2531 Copy = DAG.getCopyToReg(Copy, X86::EAX,Op.getOperand(1),Copy.getValue(1));
2534 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other,
2535 Copy, DAG.getConstant(getBytesToPopOnReturn(), MVT::i16),
2538 case ISD::SCALAR_TO_VECTOR: {
2539 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0));
2540 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt);
2542 case ISD::VECTOR_SHUFFLE: {
2543 SDOperand V1 = Op.getOperand(0);
2544 SDOperand V2 = Op.getOperand(1);
2545 SDOperand PermMask = Op.getOperand(2);
2546 MVT::ValueType VT = Op.getValueType();
2547 unsigned NumElems = PermMask.getNumOperands();
2549 // Splat && PSHUFD's 2nd vector must be undef.
2550 if (X86::isSplatMask(PermMask.Val)) {
2551 if (V2.getOpcode() != ISD::UNDEF)
2552 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
2553 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
2557 if (X86::isUNPCKLMask(PermMask.Val) ||
2558 X86::isUNPCKHMask(PermMask.Val))
2559 // Leave the VECTOR_SHUFFLE alone. It matches {P}UNPCKL*.
2563 return NormalizeVectorShuffle(V1, V2, PermMask, VT, DAG);
2565 // If VT is integer, try PSHUF* first, then SHUFP*.
2566 if (MVT::isInteger(VT)) {
2567 if (X86::isPSHUFDMask(PermMask.Val) ||
2568 X86::isPSHUFHWMask(PermMask.Val) ||
2569 X86::isPSHUFLWMask(PermMask.Val)) {
2570 if (V2.getOpcode() != ISD::UNDEF)
2571 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
2572 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
2576 if (X86::isSHUFPMask(PermMask.Val))
2577 return NormalizeVectorShuffle(V1, V2, PermMask, VT, DAG);
2579 // Floating point cases in the other order.
2580 if (X86::isSHUFPMask(PermMask.Val))
2581 return NormalizeVectorShuffle(V1, V2, PermMask, VT, DAG);
2582 if (X86::isPSHUFDMask(PermMask.Val) ||
2583 X86::isPSHUFHWMask(PermMask.Val) ||
2584 X86::isPSHUFLWMask(PermMask.Val)) {
2585 if (V2.getOpcode() != ISD::UNDEF)
2586 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
2587 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
2592 assert(0 && "Unexpected VECTOR_SHUFFLE to lower");
2595 case ISD::BUILD_VECTOR: {
2596 // All one's are handled with pcmpeqd.
2597 if (ISD::isBuildVectorAllOnes(Op.Val))
2600 std::set<SDOperand> Values;
2601 SDOperand Elt0 = Op.getOperand(0);
2602 Values.insert(Elt0);
2603 bool Elt0IsZero = (isa<ConstantSDNode>(Elt0) &&
2604 cast<ConstantSDNode>(Elt0)->getValue() == 0) ||
2605 (isa<ConstantFPSDNode>(Elt0) &&
2606 cast<ConstantFPSDNode>(Elt0)->isExactlyValue(0.0));
2607 bool RestAreZero = true;
2608 unsigned NumElems = Op.getNumOperands();
2609 for (unsigned i = 1; i < NumElems; ++i) {
2610 SDOperand Elt = Op.getOperand(i);
2611 if (ConstantFPSDNode *FPC = dyn_cast<ConstantFPSDNode>(Elt)) {
2612 if (!FPC->isExactlyValue(+0.0))
2613 RestAreZero = false;
2614 } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
2615 if (!C->isNullValue())
2616 RestAreZero = false;
2618 RestAreZero = false;
2623 if (Elt0IsZero) return Op;
2625 // Zero extend a scalar to a vector.
2626 return DAG.getNode(X86ISD::ZEXT_S2VEC, Op.getValueType(), Elt0);
2629 if (Values.size() > 2) {
2630 // Expand into a number of unpckl*.
2632 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
2633 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
2634 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
2635 MVT::ValueType VT = Op.getValueType();
2636 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2637 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
2638 std::vector<SDOperand> MaskVec;
2639 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
2640 MaskVec.push_back(DAG.getConstant(i, BaseVT));
2641 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT));
2643 SDOperand PermMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
2644 std::vector<SDOperand> V(NumElems);
2645 for (unsigned i = 0; i < NumElems; ++i)
2646 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
2648 while (NumElems != 0) {
2649 for (unsigned i = 0; i < NumElems; ++i)
2650 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems],
2662 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
2664 default: return NULL;
2665 case X86ISD::SHLD: return "X86ISD::SHLD";
2666 case X86ISD::SHRD: return "X86ISD::SHRD";
2667 case X86ISD::FAND: return "X86ISD::FAND";
2668 case X86ISD::FXOR: return "X86ISD::FXOR";
2669 case X86ISD::FILD: return "X86ISD::FILD";
2670 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
2671 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
2672 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
2673 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
2674 case X86ISD::FLD: return "X86ISD::FLD";
2675 case X86ISD::FST: return "X86ISD::FST";
2676 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT";
2677 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT";
2678 case X86ISD::CALL: return "X86ISD::CALL";
2679 case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
2680 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
2681 case X86ISD::CMP: return "X86ISD::CMP";
2682 case X86ISD::TEST: return "X86ISD::TEST";
2683 case X86ISD::SETCC: return "X86ISD::SETCC";
2684 case X86ISD::CMOV: return "X86ISD::CMOV";
2685 case X86ISD::BRCOND: return "X86ISD::BRCOND";
2686 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
2687 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
2688 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
2689 case X86ISD::LOAD_PACK: return "X86ISD::LOAD_PACK";
2690 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
2691 case X86ISD::Wrapper: return "X86ISD::Wrapper";
2692 case X86ISD::S2VEC: return "X86ISD::S2VEC";
2693 case X86ISD::ZEXT_S2VEC: return "X86ISD::ZEXT_S2VEC";
2697 void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
2699 uint64_t &KnownZero,
2701 unsigned Depth) const {
2703 unsigned Opc = Op.getOpcode();
2704 KnownZero = KnownOne = 0; // Don't know anything.
2708 assert(Opc >= ISD::BUILTIN_OP_END && "Expected a target specific node");
2711 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
2716 std::vector<unsigned> X86TargetLowering::
2717 getRegClassForInlineAsmConstraint(const std::string &Constraint,
2718 MVT::ValueType VT) const {
2719 if (Constraint.size() == 1) {
2720 // FIXME: not handling fp-stack yet!
2721 // FIXME: not handling MMX registers yet ('y' constraint).
2722 switch (Constraint[0]) { // GCC X86 Constraint Letters
2723 default: break; // Unknown constriant letter
2724 case 'r': // GENERAL_REGS
2725 case 'R': // LEGACY_REGS
2726 return make_vector<unsigned>(X86::EAX, X86::EBX, X86::ECX, X86::EDX,
2727 X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0);
2728 case 'l': // INDEX_REGS
2729 return make_vector<unsigned>(X86::EAX, X86::EBX, X86::ECX, X86::EDX,
2730 X86::ESI, X86::EDI, X86::EBP, 0);
2731 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
2733 return make_vector<unsigned>(X86::EAX, X86::EBX, X86::ECX, X86::EDX, 0);
2734 case 'x': // SSE_REGS if SSE1 allowed
2735 if (Subtarget->hasSSE1())
2736 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2737 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
2739 return std::vector<unsigned>();
2740 case 'Y': // SSE_REGS if SSE2 allowed
2741 if (Subtarget->hasSSE2())
2742 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2743 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
2745 return std::vector<unsigned>();
2749 return std::vector<unsigned>();
2752 /// isLegalAddressImmediate - Return true if the integer value or
2753 /// GlobalValue can be used as the offset of the target addressing mode.
2754 bool X86TargetLowering::isLegalAddressImmediate(int64_t V) const {
2755 // X86 allows a sign-extended 32-bit immediate field.
2756 return (V > -(1LL << 32) && V < (1LL << 32)-1);
2759 bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const {
2760 if (Subtarget->isTargetDarwin()) {
2761 Reloc::Model RModel = getTargetMachine().getRelocationModel();
2762 if (RModel == Reloc::Static)
2764 else if (RModel == Reloc::DynamicNoPIC)
2765 return !DarwinGVRequiresExtraLoad(GV);
2772 /// isShuffleMaskLegal - Targets can use this to indicate that they only
2773 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
2774 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
2775 /// are assumed to be legal.
2777 X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
2778 // Only do shuffles on 128-bit vector types for now.
2779 if (MVT::getSizeInBits(VT) == 64) return false;
2780 return (Mask.Val->getNumOperands() == 2 ||
2781 X86::isSplatMask(Mask.Val) ||
2782 X86::isPSHUFDMask(Mask.Val) ||
2783 X86::isPSHUFHWMask(Mask.Val) ||
2784 X86::isPSHUFLWMask(Mask.Val) ||
2785 X86::isSHUFPMask(Mask.Val) ||
2786 X86::isUNPCKLMask(Mask.Val) ||
2787 X86::isUNPCKHMask(Mask.Val));