1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
16 #include "X86InstrBuilder.h"
17 #include "X86ISelLowering.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Function.h"
22 #include "llvm/ADT/VectorExtras.h"
23 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/SSARegMap.h"
29 #include "llvm/Support/MathExtras.h"
30 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
36 cl::desc("Enable fastcc on X86"));
38 X86TargetLowering::X86TargetLowering(TargetMachine &TM)
39 : TargetLowering(TM) {
40 Subtarget = &TM.getSubtarget<X86Subtarget>();
41 X86ScalarSSE = Subtarget->hasSSE2();
43 // Set up the TargetLowering object.
45 // X86 is weird, it always uses i8 for shift amounts and setcc results.
46 setShiftAmountType(MVT::i8);
47 setSetCCResultType(MVT::i8);
48 setSetCCResultContents(ZeroOrOneSetCCResult);
49 setSchedulingPreference(SchedulingForRegPressure);
50 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
51 setStackPointerRegisterToSaveRestore(X86::ESP);
53 if (!Subtarget->isTargetDarwin())
54 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
55 setUseUnderscoreSetJmpLongJmp(true);
57 // Add legal addressing mode scale values.
58 addLegalAddressScale(8);
59 addLegalAddressScale(4);
60 addLegalAddressScale(2);
61 // Enter the ones which require both scale + index last. These are more
63 addLegalAddressScale(9);
64 addLegalAddressScale(5);
65 addLegalAddressScale(3);
67 // Set up the register classes.
68 addRegisterClass(MVT::i8, X86::R8RegisterClass);
69 addRegisterClass(MVT::i16, X86::R16RegisterClass);
70 addRegisterClass(MVT::i32, X86::R32RegisterClass);
72 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
74 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
75 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
76 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
79 // No SSE i64 SINT_TO_FP, so expand i32 UINT_TO_FP instead.
80 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
82 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
84 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
86 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
87 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
88 // SSE has no i16 to fp conversion, only i32
90 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
92 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
93 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
96 // We can handle SINT_TO_FP and FP_TO_SINT from/to i64 even though i64
98 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
99 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
101 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
103 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
104 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
107 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
109 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
110 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
113 // Handle FP_TO_UINT by promoting the destination to a larger signed
115 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
116 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
117 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
119 if (X86ScalarSSE && !Subtarget->hasSSE3())
120 // Expand FP_TO_UINT into a select.
121 // FIXME: We would like to use a Custom expander here eventually to do
122 // the optimal thing for SSE vs. the default expansion in the legalizer.
123 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
125 // With SSE3 we can use fisttpll to convert to a signed i64.
126 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
128 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
129 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
131 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
132 setOperationAction(ISD::BR_CC , MVT::Other, Expand);
133 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
134 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
135 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
136 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
137 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
138 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
139 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
140 setOperationAction(ISD::FREM , MVT::f64 , Expand);
141 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
142 setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
143 setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
144 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
145 setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
146 setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
147 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
148 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
149 setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
150 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
151 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
153 // These should be promoted to a larger select which is supported.
154 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
155 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
157 // X86 wants to expand cmov itself.
158 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
159 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
160 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
161 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
162 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
163 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
164 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
165 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
166 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
167 // X86 ret instruction may pop stack.
168 setOperationAction(ISD::RET , MVT::Other, Custom);
170 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
171 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
172 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
173 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
174 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
175 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
176 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
177 // X86 wants to expand memset / memcpy itself.
178 setOperationAction(ISD::MEMSET , MVT::Other, Custom);
179 setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
181 // We don't have line number support yet.
182 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
183 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
184 // FIXME - use subtarget debug flags
185 if (!Subtarget->isTargetDarwin())
186 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
188 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
189 setOperationAction(ISD::VASTART , MVT::Other, Custom);
191 // Use the default implementation.
192 setOperationAction(ISD::VAARG , MVT::Other, Expand);
193 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
194 setOperationAction(ISD::VAEND , MVT::Other, Expand);
195 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
196 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
197 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
199 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
200 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
203 // Set up the FP register classes.
204 addRegisterClass(MVT::f32, X86::FR32RegisterClass);
205 addRegisterClass(MVT::f64, X86::FR64RegisterClass);
207 // SSE has no load+extend ops
208 setOperationAction(ISD::EXTLOAD, MVT::f32, Expand);
209 setOperationAction(ISD::ZEXTLOAD, MVT::f32, Expand);
211 // Use ANDPD to simulate FABS.
212 setOperationAction(ISD::FABS , MVT::f64, Custom);
213 setOperationAction(ISD::FABS , MVT::f32, Custom);
215 // Use XORP to simulate FNEG.
216 setOperationAction(ISD::FNEG , MVT::f64, Custom);
217 setOperationAction(ISD::FNEG , MVT::f32, Custom);
219 // We don't support sin/cos/fmod
220 setOperationAction(ISD::FSIN , MVT::f64, Expand);
221 setOperationAction(ISD::FCOS , MVT::f64, Expand);
222 setOperationAction(ISD::FREM , MVT::f64, Expand);
223 setOperationAction(ISD::FSIN , MVT::f32, Expand);
224 setOperationAction(ISD::FCOS , MVT::f32, Expand);
225 setOperationAction(ISD::FREM , MVT::f32, Expand);
227 // Expand FP immediates into loads from the stack, except for the special
229 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
230 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
231 addLegalFPImmediate(+0.0); // xorps / xorpd
233 // Set up the FP register classes.
234 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
236 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
239 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
240 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
243 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
244 addLegalFPImmediate(+0.0); // FLD0
245 addLegalFPImmediate(+1.0); // FLD1
246 addLegalFPImmediate(-0.0); // FLD0/FCHS
247 addLegalFPImmediate(-1.0); // FLD1/FCHS
250 // First set operation action for all vector types to expand. Then we
251 // will selectively turn on ones that can be effectively codegen'd.
252 for (unsigned VT = (unsigned)MVT::Vector + 1;
253 VT != (unsigned)MVT::LAST_VALUETYPE; VT++) {
254 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
255 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
256 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
257 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand);
258 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand);
259 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
260 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
263 if (Subtarget->hasMMX()) {
264 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
265 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
266 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
268 // FIXME: add MMX packed arithmetics
269 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Expand);
270 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Expand);
271 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Expand);
274 if (Subtarget->hasSSE1()) {
275 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
277 setOperationAction(ISD::ADD, MVT::v4f32, Legal);
278 setOperationAction(ISD::SUB, MVT::v4f32, Legal);
279 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
280 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
281 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
282 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
283 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
286 if (Subtarget->hasSSE2()) {
287 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
288 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
289 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
290 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
291 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
294 setOperationAction(ISD::ADD, MVT::v2f64, Legal);
295 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
296 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
297 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
298 setOperationAction(ISD::SUB, MVT::v2f64, Legal);
299 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
300 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
301 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
302 setOperationAction(ISD::MUL, MVT::v2f64, Legal);
303 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
304 setOperationAction(ISD::LOAD, MVT::v16i8, Legal);
305 setOperationAction(ISD::LOAD, MVT::v8i16, Legal);
306 setOperationAction(ISD::LOAD, MVT::v4i32, Legal);
307 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
308 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
309 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
310 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
311 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
312 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
313 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
314 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
315 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
316 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
317 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom);
318 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom);
319 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
320 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
321 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
322 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
323 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
326 computeRegisterProperties();
328 // FIXME: These should be based on subtarget info. Plus, the values should
329 // be smaller when we are in optimizing for size mode.
330 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores
331 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores
332 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores
333 allowUnalignedMemoryAccesses = true; // x86 supports it!
336 std::vector<SDOperand>
337 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
338 if (F.getCallingConv() == CallingConv::Fast && EnableFastCC)
339 return LowerFastCCArguments(F, DAG);
340 return LowerCCCArguments(F, DAG);
343 std::pair<SDOperand, SDOperand>
344 X86TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
345 bool isVarArg, unsigned CallingConv,
347 SDOperand Callee, ArgListTy &Args,
349 assert((!isVarArg || CallingConv == CallingConv::C) &&
350 "Only C takes varargs!");
352 // If the callee is a GlobalAddress node (quite common, every direct call is)
353 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
354 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
355 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
356 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
357 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
359 if (CallingConv == CallingConv::Fast && EnableFastCC)
360 return LowerFastCCCallTo(Chain, RetTy, isTailCall, Callee, Args, DAG);
361 return LowerCCCCallTo(Chain, RetTy, isVarArg, isTailCall, Callee, Args, DAG);
364 //===----------------------------------------------------------------------===//
365 // C Calling Convention implementation
366 //===----------------------------------------------------------------------===//
368 std::vector<SDOperand>
369 X86TargetLowering::LowerCCCArguments(Function &F, SelectionDAG &DAG) {
370 std::vector<SDOperand> ArgValues;
372 MachineFunction &MF = DAG.getMachineFunction();
373 MachineFrameInfo *MFI = MF.getFrameInfo();
375 // Add DAG nodes to load the arguments... On entry to a function on the X86,
376 // the stack frame looks like this:
378 // [ESP] -- return address
379 // [ESP + 4] -- first argument (leftmost lexically)
380 // [ESP + 8] -- second argument, if first argument is four bytes in size
383 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
384 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
385 MVT::ValueType ObjectVT = getValueType(I->getType());
386 unsigned ArgIncrement = 4;
389 default: assert(0 && "Unhandled argument type!");
391 case MVT::i8: ObjSize = 1; break;
392 case MVT::i16: ObjSize = 2; break;
393 case MVT::i32: ObjSize = 4; break;
394 case MVT::i64: ObjSize = ArgIncrement = 8; break;
395 case MVT::f32: ObjSize = 4; break;
396 case MVT::f64: ObjSize = ArgIncrement = 8; break;
398 // Create the frame index object for this incoming parameter...
399 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
401 // Create the SelectionDAG nodes corresponding to a load from this parameter
402 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
404 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
408 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
409 DAG.getSrcValue(NULL));
411 if (MVT::isInteger(ObjectVT))
412 ArgValue = DAG.getConstant(0, ObjectVT);
414 ArgValue = DAG.getConstantFP(0, ObjectVT);
416 ArgValues.push_back(ArgValue);
418 ArgOffset += ArgIncrement; // Move on to the next argument...
421 // If the function takes variable number of arguments, make a frame index for
422 // the start of the first vararg value... for expansion of llvm.va_start.
424 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
425 ReturnAddrIndex = 0; // No return address slot generated yet.
426 BytesToPopOnReturn = 0; // Callee pops nothing.
427 BytesCallerReserves = ArgOffset;
429 // Finally, inform the code generator which regs we return values in.
430 switch (getValueType(F.getReturnType())) {
431 default: assert(0 && "Unknown type!");
432 case MVT::isVoid: break;
437 MF.addLiveOut(X86::EAX);
440 MF.addLiveOut(X86::EAX);
441 MF.addLiveOut(X86::EDX);
445 MF.addLiveOut(X86::ST0);
451 std::pair<SDOperand, SDOperand>
452 X86TargetLowering::LowerCCCCallTo(SDOperand Chain, const Type *RetTy,
453 bool isVarArg, bool isTailCall,
454 SDOperand Callee, ArgListTy &Args,
456 // Count how many bytes are to be pushed on the stack.
457 unsigned NumBytes = 0;
461 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(0, getPointerTy()));
463 for (unsigned i = 0, e = Args.size(); i != e; ++i)
464 switch (getValueType(Args[i].second)) {
465 default: assert(0 && "Unknown value type!");
479 Chain = DAG.getCALLSEQ_START(Chain,
480 DAG.getConstant(NumBytes, getPointerTy()));
482 // Arguments go on the stack in reverse order, as specified by the ABI.
483 unsigned ArgOffset = 0;
484 SDOperand StackPtr = DAG.getRegister(X86::ESP, MVT::i32);
485 std::vector<SDOperand> Stores;
487 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
488 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
489 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
491 switch (getValueType(Args[i].second)) {
492 default: assert(0 && "Unexpected ValueType for argument!");
496 // Promote the integer to 32 bits. If the input type is signed use a
497 // sign extend, otherwise use a zero extend.
498 if (Args[i].second->isSigned())
499 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
501 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
506 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
507 Args[i].first, PtrOff,
508 DAG.getSrcValue(NULL)));
513 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
514 Args[i].first, PtrOff,
515 DAG.getSrcValue(NULL)));
520 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
523 std::vector<MVT::ValueType> RetVals;
524 MVT::ValueType RetTyVT = getValueType(RetTy);
525 RetVals.push_back(MVT::Other);
527 // The result values produced have to be legal. Promote the result.
529 case MVT::isVoid: break;
531 RetVals.push_back(RetTyVT);
536 RetVals.push_back(MVT::i32);
540 RetVals.push_back(MVT::f32);
542 RetVals.push_back(MVT::f64);
545 RetVals.push_back(MVT::i32);
546 RetVals.push_back(MVT::i32);
550 std::vector<MVT::ValueType> NodeTys;
551 NodeTys.push_back(MVT::Other); // Returns a chain
552 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
553 std::vector<SDOperand> Ops;
554 Ops.push_back(Chain);
555 Ops.push_back(Callee);
557 // FIXME: Do not generate X86ISD::TAILCALL for now.
558 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
559 SDOperand InFlag = Chain.getValue(1);
562 NodeTys.push_back(MVT::Other); // Returns a chain
563 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
565 Ops.push_back(Chain);
566 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
567 Ops.push_back(DAG.getConstant(0, getPointerTy()));
568 Ops.push_back(InFlag);
569 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, Ops);
570 InFlag = Chain.getValue(1);
573 if (RetTyVT != MVT::isVoid) {
575 default: assert(0 && "Unknown value type to return!");
578 RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
579 Chain = RetVal.getValue(1);
580 if (RetTyVT == MVT::i1)
581 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::i1, RetVal);
584 RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
585 Chain = RetVal.getValue(1);
588 RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
589 Chain = RetVal.getValue(1);
592 SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
593 SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
595 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
596 Chain = Hi.getValue(1);
601 std::vector<MVT::ValueType> Tys;
602 Tys.push_back(MVT::f64);
603 Tys.push_back(MVT::Other);
604 Tys.push_back(MVT::Flag);
605 std::vector<SDOperand> Ops;
606 Ops.push_back(Chain);
607 Ops.push_back(InFlag);
608 RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
609 Chain = RetVal.getValue(1);
610 InFlag = RetVal.getValue(2);
612 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
613 // shouldn't be necessary except that RFP cannot be live across
614 // multiple blocks. When stackifier is fixed, they can be uncoupled.
615 MachineFunction &MF = DAG.getMachineFunction();
616 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
617 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
619 Tys.push_back(MVT::Other);
621 Ops.push_back(Chain);
622 Ops.push_back(RetVal);
623 Ops.push_back(StackSlot);
624 Ops.push_back(DAG.getValueType(RetTyVT));
625 Ops.push_back(InFlag);
626 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
627 RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
628 DAG.getSrcValue(NULL));
629 Chain = RetVal.getValue(1);
632 if (RetTyVT == MVT::f32 && !X86ScalarSSE)
633 // FIXME: we would really like to remember that this FP_ROUND
634 // operation is okay to eliminate if we allow excess FP precision.
635 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
641 return std::make_pair(RetVal, Chain);
644 //===----------------------------------------------------------------------===//
645 // Fast Calling Convention implementation
646 //===----------------------------------------------------------------------===//
648 // The X86 'fast' calling convention passes up to two integer arguments in
649 // registers (an appropriate portion of EAX/EDX), passes arguments in C order,
650 // and requires that the callee pop its arguments off the stack (allowing proper
651 // tail calls), and has the same return value conventions as C calling convs.
653 // This calling convention always arranges for the callee pop value to be 8n+4
654 // bytes, which is needed for tail recursion elimination and stack alignment
657 // Note that this can be enhanced in the future to pass fp vals in registers
658 // (when we have a global fp allocator) and do other tricks.
661 /// AddLiveIn - This helper function adds the specified physical register to the
662 /// MachineFunction as a live in value. It also creates a corresponding virtual
664 static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
665 TargetRegisterClass *RC) {
666 assert(RC->contains(PReg) && "Not the correct regclass!");
667 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
668 MF.addLiveIn(PReg, VReg);
672 // FASTCC_NUM_INT_ARGS_INREGS - This is the max number of integer arguments
673 // to pass in registers. 0 is none, 1 is is "use EAX", 2 is "use EAX and
674 // EDX". Anything more is illegal.
676 // FIXME: The linscan register allocator currently has problem with
677 // coalescing. At the time of this writing, whenever it decides to coalesce
678 // a physreg with a virtreg, this increases the size of the physreg's live
679 // range, and the live range cannot ever be reduced. This causes problems if
680 // too many physregs are coaleced with virtregs, which can cause the register
681 // allocator to wedge itself.
683 // This code triggers this problem more often if we pass args in registers,
684 // so disable it until this is fixed.
686 // NOTE: this isn't marked const, so that GCC doesn't emit annoying warnings
687 // about code being dead.
689 static unsigned FASTCC_NUM_INT_ARGS_INREGS = 0;
692 std::vector<SDOperand>
693 X86TargetLowering::LowerFastCCArguments(Function &F, SelectionDAG &DAG) {
694 std::vector<SDOperand> ArgValues;
696 MachineFunction &MF = DAG.getMachineFunction();
697 MachineFrameInfo *MFI = MF.getFrameInfo();
699 // Add DAG nodes to load the arguments... On entry to a function the stack
700 // frame looks like this:
702 // [ESP] -- return address
703 // [ESP + 4] -- first nonreg argument (leftmost lexically)
704 // [ESP + 8] -- second nonreg argument, if first argument is 4 bytes in size
706 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
708 // Keep track of the number of integer regs passed so far. This can be either
709 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
711 unsigned NumIntRegs = 0;
713 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
714 MVT::ValueType ObjectVT = getValueType(I->getType());
715 unsigned ArgIncrement = 4;
716 unsigned ObjSize = 0;
720 default: assert(0 && "Unhandled argument type!");
723 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
724 if (!I->use_empty()) {
725 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
726 X86::R8RegisterClass);
727 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i8);
728 DAG.setRoot(ArgValue.getValue(1));
729 if (ObjectVT == MVT::i1)
730 // FIXME: Should insert a assertzext here.
731 ArgValue = DAG.getNode(ISD::TRUNCATE, MVT::i1, ArgValue);
740 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
741 if (!I->use_empty()) {
742 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
743 X86::R16RegisterClass);
744 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i16);
745 DAG.setRoot(ArgValue.getValue(1));
753 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
754 if (!I->use_empty()) {
755 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
756 X86::R32RegisterClass);
757 ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
758 DAG.setRoot(ArgValue.getValue(1));
766 if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
767 if (!I->use_empty()) {
768 unsigned BotReg = AddLiveIn(MF, X86::EAX, X86::R32RegisterClass);
769 unsigned TopReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
771 SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
772 SDOperand Hi = DAG.getCopyFromReg(Low.getValue(1), TopReg, MVT::i32);
773 DAG.setRoot(Hi.getValue(1));
775 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
779 } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
780 if (!I->use_empty()) {
781 unsigned BotReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
782 SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
783 DAG.setRoot(Low.getValue(1));
785 // Load the high part from memory.
786 // Create the frame index object for this incoming parameter...
787 int FI = MFI->CreateFixedObject(4, ArgOffset);
788 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
789 SDOperand Hi = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
790 DAG.getSrcValue(NULL));
791 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
794 NumIntRegs = FASTCC_NUM_INT_ARGS_INREGS;
797 ObjSize = ArgIncrement = 8;
799 case MVT::f32: ObjSize = 4; break;
800 case MVT::f64: ObjSize = ArgIncrement = 8; break;
803 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
805 if (ObjSize && !I->use_empty()) {
806 // Create the frame index object for this incoming parameter...
807 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
809 // Create the SelectionDAG nodes corresponding to a load from this
811 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
813 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
814 DAG.getSrcValue(NULL));
815 } else if (ArgValue.Val == 0) {
816 if (MVT::isInteger(ObjectVT))
817 ArgValue = DAG.getConstant(0, ObjectVT);
819 ArgValue = DAG.getConstantFP(0, ObjectVT);
821 ArgValues.push_back(ArgValue);
824 ArgOffset += ArgIncrement; // Move on to the next argument.
827 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
828 // arguments and the arguments after the retaddr has been pushed are aligned.
829 if ((ArgOffset & 7) == 0)
832 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
833 ReturnAddrIndex = 0; // No return address slot generated yet.
834 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
835 BytesCallerReserves = 0;
837 // Finally, inform the code generator which regs we return values in.
838 switch (getValueType(F.getReturnType())) {
839 default: assert(0 && "Unknown type!");
840 case MVT::isVoid: break;
845 MF.addLiveOut(X86::EAX);
848 MF.addLiveOut(X86::EAX);
849 MF.addLiveOut(X86::EDX);
853 MF.addLiveOut(X86::ST0);
859 std::pair<SDOperand, SDOperand>
860 X86TargetLowering::LowerFastCCCallTo(SDOperand Chain, const Type *RetTy,
861 bool isTailCall, SDOperand Callee,
862 ArgListTy &Args, SelectionDAG &DAG) {
863 // Count how many bytes are to be pushed on the stack.
864 unsigned NumBytes = 0;
866 // Keep track of the number of integer regs passed so far. This can be either
867 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
869 unsigned NumIntRegs = 0;
871 for (unsigned i = 0, e = Args.size(); i != e; ++i)
872 switch (getValueType(Args[i].second)) {
873 default: assert(0 && "Unknown value type!");
878 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
887 if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
890 } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
891 NumIntRegs = FASTCC_NUM_INT_ARGS_INREGS;
902 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
903 // arguments and the arguments after the retaddr has been pushed are aligned.
904 if ((NumBytes & 7) == 0)
907 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
909 // Arguments go on the stack in reverse order, as specified by the ABI.
910 unsigned ArgOffset = 0;
911 SDOperand StackPtr = DAG.getRegister(X86::ESP, MVT::i32);
913 std::vector<SDOperand> Stores;
914 std::vector<SDOperand> RegValuesToPass;
915 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
916 switch (getValueType(Args[i].second)) {
917 default: assert(0 && "Unexpected ValueType for argument!");
919 Args[i].first = DAG.getNode(ISD::ANY_EXTEND, MVT::i8, Args[i].first);
924 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
925 RegValuesToPass.push_back(Args[i].first);
931 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
932 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
933 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
934 Args[i].first, PtrOff,
935 DAG.getSrcValue(NULL)));
940 // Can pass (at least) part of it in regs?
941 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
942 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
943 Args[i].first, DAG.getConstant(1, MVT::i32));
944 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
945 Args[i].first, DAG.getConstant(0, MVT::i32));
946 RegValuesToPass.push_back(Lo);
949 // Pass both parts in regs?
950 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
951 RegValuesToPass.push_back(Hi);
954 // Pass the high part in memory.
955 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
956 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
957 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
958 Hi, PtrOff, DAG.getSrcValue(NULL)));
965 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
966 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
967 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
968 Args[i].first, PtrOff,
969 DAG.getSrcValue(NULL)));
975 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
977 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
978 // arguments and the arguments after the retaddr has been pushed are aligned.
979 if ((ArgOffset & 7) == 0)
982 std::vector<MVT::ValueType> RetVals;
983 MVT::ValueType RetTyVT = getValueType(RetTy);
985 RetVals.push_back(MVT::Other);
987 // The result values produced have to be legal. Promote the result.
989 case MVT::isVoid: break;
991 RetVals.push_back(RetTyVT);
996 RetVals.push_back(MVT::i32);
1000 RetVals.push_back(MVT::f32);
1002 RetVals.push_back(MVT::f64);
1005 RetVals.push_back(MVT::i32);
1006 RetVals.push_back(MVT::i32);
1010 // Build a sequence of copy-to-reg nodes chained together with token chain
1011 // and flag operands which copy the outgoing args into registers.
1013 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
1015 SDOperand RegToPass = RegValuesToPass[i];
1016 switch (RegToPass.getValueType()) {
1017 default: assert(0 && "Bad thing to pass in regs");
1019 CCReg = (i == 0) ? X86::AL : X86::DL;
1022 CCReg = (i == 0) ? X86::AX : X86::DX;
1025 CCReg = (i == 0) ? X86::EAX : X86::EDX;
1029 Chain = DAG.getCopyToReg(Chain, CCReg, RegToPass, InFlag);
1030 InFlag = Chain.getValue(1);
1033 std::vector<MVT::ValueType> NodeTys;
1034 NodeTys.push_back(MVT::Other); // Returns a chain
1035 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1036 std::vector<SDOperand> Ops;
1037 Ops.push_back(Chain);
1038 Ops.push_back(Callee);
1040 Ops.push_back(InFlag);
1042 // FIXME: Do not generate X86ISD::TAILCALL for now.
1043 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
1044 InFlag = Chain.getValue(1);
1047 NodeTys.push_back(MVT::Other); // Returns a chain
1048 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1050 Ops.push_back(Chain);
1051 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
1052 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
1053 Ops.push_back(InFlag);
1054 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, Ops);
1055 InFlag = Chain.getValue(1);
1058 if (RetTyVT != MVT::isVoid) {
1060 default: assert(0 && "Unknown value type to return!");
1063 RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
1064 Chain = RetVal.getValue(1);
1065 if (RetTyVT == MVT::i1)
1066 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::i1, RetVal);
1069 RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
1070 Chain = RetVal.getValue(1);
1073 RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
1074 Chain = RetVal.getValue(1);
1077 SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
1078 SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
1080 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
1081 Chain = Hi.getValue(1);
1086 std::vector<MVT::ValueType> Tys;
1087 Tys.push_back(MVT::f64);
1088 Tys.push_back(MVT::Other);
1089 Tys.push_back(MVT::Flag);
1090 std::vector<SDOperand> Ops;
1091 Ops.push_back(Chain);
1092 Ops.push_back(InFlag);
1093 RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
1094 Chain = RetVal.getValue(1);
1095 InFlag = RetVal.getValue(2);
1097 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
1098 // shouldn't be necessary except that RFP cannot be live across
1099 // multiple blocks. When stackifier is fixed, they can be uncoupled.
1100 MachineFunction &MF = DAG.getMachineFunction();
1101 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
1102 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1104 Tys.push_back(MVT::Other);
1106 Ops.push_back(Chain);
1107 Ops.push_back(RetVal);
1108 Ops.push_back(StackSlot);
1109 Ops.push_back(DAG.getValueType(RetTyVT));
1110 Ops.push_back(InFlag);
1111 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
1112 RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
1113 DAG.getSrcValue(NULL));
1114 Chain = RetVal.getValue(1);
1117 if (RetTyVT == MVT::f32 && !X86ScalarSSE)
1118 // FIXME: we would really like to remember that this FP_ROUND
1119 // operation is okay to eliminate if we allow excess FP precision.
1120 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
1126 return std::make_pair(RetVal, Chain);
1129 SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
1130 if (ReturnAddrIndex == 0) {
1131 // Set up a frame object for the return address.
1132 MachineFunction &MF = DAG.getMachineFunction();
1133 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
1136 return DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
1141 std::pair<SDOperand, SDOperand> X86TargetLowering::
1142 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
1143 SelectionDAG &DAG) {
1145 if (Depth) // Depths > 0 not supported yet!
1146 Result = DAG.getConstant(0, getPointerTy());
1148 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
1149 if (!isFrameAddress)
1150 // Just load the return address
1151 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI,
1152 DAG.getSrcValue(NULL));
1154 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
1155 DAG.getConstant(4, MVT::i32));
1157 return std::make_pair(Result, Chain);
1160 /// getCondBrOpcodeForX86CC - Returns the X86 conditional branch opcode
1161 /// which corresponds to the condition code.
1162 static unsigned getCondBrOpcodeForX86CC(unsigned X86CC) {
1164 default: assert(0 && "Unknown X86 conditional code!");
1165 case X86ISD::COND_A: return X86::JA;
1166 case X86ISD::COND_AE: return X86::JAE;
1167 case X86ISD::COND_B: return X86::JB;
1168 case X86ISD::COND_BE: return X86::JBE;
1169 case X86ISD::COND_E: return X86::JE;
1170 case X86ISD::COND_G: return X86::JG;
1171 case X86ISD::COND_GE: return X86::JGE;
1172 case X86ISD::COND_L: return X86::JL;
1173 case X86ISD::COND_LE: return X86::JLE;
1174 case X86ISD::COND_NE: return X86::JNE;
1175 case X86ISD::COND_NO: return X86::JNO;
1176 case X86ISD::COND_NP: return X86::JNP;
1177 case X86ISD::COND_NS: return X86::JNS;
1178 case X86ISD::COND_O: return X86::JO;
1179 case X86ISD::COND_P: return X86::JP;
1180 case X86ISD::COND_S: return X86::JS;
1184 /// translateX86CC - do a one to one translation of a ISD::CondCode to the X86
1185 /// specific condition code. It returns a false if it cannot do a direct
1186 /// translation. X86CC is the translated CondCode. Flip is set to true if the
1187 /// the order of comparison operands should be flipped.
1188 static bool translateX86CC(SDOperand CC, bool isFP, unsigned &X86CC,
1190 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
1192 X86CC = X86ISD::COND_INVALID;
1194 switch (SetCCOpcode) {
1196 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
1197 case ISD::SETGT: X86CC = X86ISD::COND_G; break;
1198 case ISD::SETGE: X86CC = X86ISD::COND_GE; break;
1199 case ISD::SETLT: X86CC = X86ISD::COND_L; break;
1200 case ISD::SETLE: X86CC = X86ISD::COND_LE; break;
1201 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1202 case ISD::SETULT: X86CC = X86ISD::COND_B; break;
1203 case ISD::SETUGT: X86CC = X86ISD::COND_A; break;
1204 case ISD::SETULE: X86CC = X86ISD::COND_BE; break;
1205 case ISD::SETUGE: X86CC = X86ISD::COND_AE; break;
1208 // On a floating point condition, the flags are set as follows:
1210 // 0 | 0 | 0 | X > Y
1211 // 0 | 0 | 1 | X < Y
1212 // 1 | 0 | 0 | X == Y
1213 // 1 | 1 | 1 | unordered
1214 switch (SetCCOpcode) {
1217 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
1218 case ISD::SETOLE: Flip = true; // Fallthrough
1220 case ISD::SETGT: X86CC = X86ISD::COND_A; break;
1221 case ISD::SETOLT: Flip = true; // Fallthrough
1223 case ISD::SETGE: X86CC = X86ISD::COND_AE; break;
1224 case ISD::SETUGE: Flip = true; // Fallthrough
1226 case ISD::SETLT: X86CC = X86ISD::COND_B; break;
1227 case ISD::SETUGT: Flip = true; // Fallthrough
1229 case ISD::SETLE: X86CC = X86ISD::COND_BE; break;
1231 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1232 case ISD::SETUO: X86CC = X86ISD::COND_P; break;
1233 case ISD::SETO: X86CC = X86ISD::COND_NP; break;
1237 return X86CC != X86ISD::COND_INVALID;
1240 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
1241 /// code. Current x86 isa includes the following FP cmov instructions:
1242 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
1243 static bool hasFPCMov(unsigned X86CC) {
1247 case X86ISD::COND_B:
1248 case X86ISD::COND_BE:
1249 case X86ISD::COND_E:
1250 case X86ISD::COND_P:
1251 case X86ISD::COND_A:
1252 case X86ISD::COND_AE:
1253 case X86ISD::COND_NE:
1254 case X86ISD::COND_NP:
1260 X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1261 MachineBasicBlock *BB) {
1262 switch (MI->getOpcode()) {
1263 default: assert(false && "Unexpected instr type to insert");
1264 case X86::CMOV_FR32:
1265 case X86::CMOV_FR64: {
1266 // To "insert" a SELECT_CC instruction, we actually have to insert the
1267 // diamond control-flow pattern. The incoming instruction knows the
1268 // destination vreg to set, the condition code register to branch on, the
1269 // true/false values to select between, and a branch opcode to use.
1270 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1271 ilist<MachineBasicBlock>::iterator It = BB;
1277 // cmpTY ccX, r1, r2
1279 // fallthrough --> copy0MBB
1280 MachineBasicBlock *thisMBB = BB;
1281 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
1282 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
1283 unsigned Opc = getCondBrOpcodeForX86CC(MI->getOperand(3).getImmedValue());
1284 BuildMI(BB, Opc, 1).addMBB(sinkMBB);
1285 MachineFunction *F = BB->getParent();
1286 F->getBasicBlockList().insert(It, copy0MBB);
1287 F->getBasicBlockList().insert(It, sinkMBB);
1288 // Update machine-CFG edges by first adding all successors of the current
1289 // block to the new block which will contain the Phi node for the select.
1290 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
1291 e = BB->succ_end(); i != e; ++i)
1292 sinkMBB->addSuccessor(*i);
1293 // Next, remove all successors of the current block, and add the true
1294 // and fallthrough blocks as its successors.
1295 while(!BB->succ_empty())
1296 BB->removeSuccessor(BB->succ_begin());
1297 BB->addSuccessor(copy0MBB);
1298 BB->addSuccessor(sinkMBB);
1301 // %FalseValue = ...
1302 // # fallthrough to sinkMBB
1305 // Update machine-CFG edges
1306 BB->addSuccessor(sinkMBB);
1309 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1312 BuildMI(BB, X86::PHI, 4, MI->getOperand(0).getReg())
1313 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
1314 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1316 delete MI; // The pseudo instruction is gone now.
1320 case X86::FP_TO_INT16_IN_MEM:
1321 case X86::FP_TO_INT32_IN_MEM:
1322 case X86::FP_TO_INT64_IN_MEM: {
1323 // Change the floating point control register to use "round towards zero"
1324 // mode when truncating to an integer value.
1325 MachineFunction *F = BB->getParent();
1326 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
1327 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1329 // Load the old value of the high byte of the control word...
1331 F->getSSARegMap()->createVirtualRegister(X86::R16RegisterClass);
1332 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
1334 // Set the high part to be round to zero...
1335 addFrameReference(BuildMI(BB, X86::MOV16mi, 5), CWFrameIdx).addImm(0xC7F);
1337 // Reload the modified control word now...
1338 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1340 // Restore the memory image of control word to original value
1341 addFrameReference(BuildMI(BB, X86::MOV16mr, 5), CWFrameIdx).addReg(OldCW);
1343 // Get the X86 opcode to use.
1345 switch (MI->getOpcode()) {
1346 default: assert(0 && "illegal opcode!");
1347 case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break;
1348 case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break;
1349 case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break;
1353 MachineOperand &Op = MI->getOperand(0);
1354 if (Op.isRegister()) {
1355 AM.BaseType = X86AddressMode::RegBase;
1356 AM.Base.Reg = Op.getReg();
1358 AM.BaseType = X86AddressMode::FrameIndexBase;
1359 AM.Base.FrameIndex = Op.getFrameIndex();
1361 Op = MI->getOperand(1);
1362 if (Op.isImmediate())
1363 AM.Scale = Op.getImmedValue();
1364 Op = MI->getOperand(2);
1365 if (Op.isImmediate())
1366 AM.IndexReg = Op.getImmedValue();
1367 Op = MI->getOperand(3);
1368 if (Op.isGlobalAddress()) {
1369 AM.GV = Op.getGlobal();
1371 AM.Disp = Op.getImmedValue();
1373 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(MI->getOperand(4).getReg());
1375 // Reload the original control word now.
1376 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1378 delete MI; // The pseudo instruction is gone now.
1385 //===----------------------------------------------------------------------===//
1386 // X86 Custom Lowering Hooks
1387 //===----------------------------------------------------------------------===//
1389 /// DarwinGVRequiresExtraLoad - true if accessing the GV requires an extra
1390 /// load. For Darwin, external and weak symbols are indirect, loading the value
1391 /// at address GV rather then the value of GV itself. This means that the
1392 /// GlobalAddress must be in the base or index register of the address, not the
1393 /// GV offset field.
1394 static bool DarwinGVRequiresExtraLoad(GlobalValue *GV) {
1395 return (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
1396 (GV->isExternal() && !GV->hasNotBeenReadFromBytecode()));
1399 /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
1400 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
1401 bool X86::isPSHUFDMask(SDNode *N) {
1402 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1404 if (N->getNumOperands() != 4)
1407 // Check if the value doesn't reference the second vector.
1408 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1409 SDOperand Arg = N->getOperand(i);
1410 if (Arg.getOpcode() == ISD::UNDEF) continue;
1411 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1412 if (cast<ConstantSDNode>(Arg)->getValue() >= 4)
1419 /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
1420 /// specifies a shuffle of elements that is suitable for input to PSHUFHW.
1421 bool X86::isPSHUFHWMask(SDNode *N) {
1422 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1424 if (N->getNumOperands() != 8)
1427 // Lower quadword copied in order.
1428 for (unsigned i = 0; i != 4; ++i) {
1429 SDOperand Arg = N->getOperand(i);
1430 if (Arg.getOpcode() == ISD::UNDEF) continue;
1431 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1432 if (cast<ConstantSDNode>(Arg)->getValue() != i)
1436 // Upper quadword shuffled.
1437 for (unsigned i = 4; i != 8; ++i) {
1438 SDOperand Arg = N->getOperand(i);
1439 if (Arg.getOpcode() == ISD::UNDEF) continue;
1440 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1441 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1442 if (Val < 4 || Val > 7)
1449 /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
1450 /// specifies a shuffle of elements that is suitable for input to PSHUFLW.
1451 bool X86::isPSHUFLWMask(SDNode *N) {
1452 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1454 if (N->getNumOperands() != 8)
1457 // Upper quadword copied in order.
1458 for (unsigned i = 4; i != 8; ++i) {
1459 SDOperand Arg = N->getOperand(i);
1460 if (Arg.getOpcode() == ISD::UNDEF) continue;
1461 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1462 if (cast<ConstantSDNode>(Arg)->getValue() != i)
1466 // Lower quadword shuffled.
1467 for (unsigned i = 0; i != 4; ++i) {
1468 SDOperand Arg = N->getOperand(i);
1469 if (Arg.getOpcode() == ISD::UNDEF) continue;
1470 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1471 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1479 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
1480 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
1481 bool X86::isSHUFPMask(SDNode *N) {
1482 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1484 unsigned NumElems = N->getNumOperands();
1485 if (NumElems == 2) {
1486 // The only case that ought be handled by SHUFPD is
1487 // Dest { 2, 1 } <= shuffle( Dest { 1, 0 }, Src { 3, 2 }
1488 // Expect bit 0 == 1, bit1 == 2
1489 SDOperand Bit0 = N->getOperand(0);
1490 if (Bit0.getOpcode() != ISD::UNDEF) {
1491 assert(isa<ConstantSDNode>(Bit0) && "Invalid VECTOR_SHUFFLE mask!");
1492 if (cast<ConstantSDNode>(Bit0)->getValue() != 1)
1496 SDOperand Bit1 = N->getOperand(1);
1497 if (Bit1.getOpcode() != ISD::UNDEF) {
1498 assert(isa<ConstantSDNode>(Bit1) && "Invalid VECTOR_SHUFFLE mask!");
1499 if (cast<ConstantSDNode>(Bit1)->getValue() != 2)
1506 if (NumElems != 4) return false;
1508 // Each half must refer to only one of the vector.
1509 for (unsigned i = 0; i < 2; ++i) {
1510 SDOperand Arg = N->getOperand(i);
1511 if (Arg.getOpcode() == ISD::UNDEF) continue;
1512 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1513 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1514 if (Val >= 4) return false;
1516 for (unsigned i = 2; i < 4; ++i) {
1517 SDOperand Arg = N->getOperand(i);
1518 if (Arg.getOpcode() == ISD::UNDEF) continue;
1519 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1520 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1521 if (Val < 4) return false;
1527 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
1528 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
1529 bool X86::isMOVHLPSMask(SDNode *N) {
1530 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1532 if (N->getNumOperands() != 4)
1535 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
1536 SDOperand Bit0 = N->getOperand(0);
1537 SDOperand Bit1 = N->getOperand(1);
1538 SDOperand Bit2 = N->getOperand(2);
1539 SDOperand Bit3 = N->getOperand(3);
1541 if (Bit0.getOpcode() != ISD::UNDEF) {
1542 assert(isa<ConstantSDNode>(Bit0) && "Invalid VECTOR_SHUFFLE mask!");
1543 if (cast<ConstantSDNode>(Bit0)->getValue() != 6)
1547 if (Bit1.getOpcode() != ISD::UNDEF) {
1548 assert(isa<ConstantSDNode>(Bit1) && "Invalid VECTOR_SHUFFLE mask!");
1549 if (cast<ConstantSDNode>(Bit1)->getValue() != 7)
1553 if (Bit2.getOpcode() != ISD::UNDEF) {
1554 assert(isa<ConstantSDNode>(Bit2) && "Invalid VECTOR_SHUFFLE mask!");
1555 if (cast<ConstantSDNode>(Bit2)->getValue() != 2)
1559 if (Bit3.getOpcode() != ISD::UNDEF) {
1560 assert(isa<ConstantSDNode>(Bit3) && "Invalid VECTOR_SHUFFLE mask!");
1561 if (cast<ConstantSDNode>(Bit3)->getValue() != 3)
1568 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
1569 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
1570 bool X86::isMOVLHPSMask(SDNode *N) {
1571 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1573 if (N->getNumOperands() != 4)
1576 // Expect bit0 == 0, bit1 == 1, bit2 == 4, bit3 == 5
1577 SDOperand Bit0 = N->getOperand(0);
1578 SDOperand Bit1 = N->getOperand(1);
1579 SDOperand Bit2 = N->getOperand(2);
1580 SDOperand Bit3 = N->getOperand(3);
1582 if (Bit0.getOpcode() != ISD::UNDEF) {
1583 assert(isa<ConstantSDNode>(Bit0) && "Invalid VECTOR_SHUFFLE mask!");
1584 if (cast<ConstantSDNode>(Bit0)->getValue() != 0)
1588 if (Bit1.getOpcode() != ISD::UNDEF) {
1589 assert(isa<ConstantSDNode>(Bit1) && "Invalid VECTOR_SHUFFLE mask!");
1590 if (cast<ConstantSDNode>(Bit1)->getValue() != 1)
1594 if (Bit2.getOpcode() != ISD::UNDEF) {
1595 assert(isa<ConstantSDNode>(Bit2) && "Invalid VECTOR_SHUFFLE mask!");
1596 if (cast<ConstantSDNode>(Bit2)->getValue() != 4)
1600 if (Bit3.getOpcode() != ISD::UNDEF) {
1601 assert(isa<ConstantSDNode>(Bit3) && "Invalid VECTOR_SHUFFLE mask!");
1602 if (cast<ConstantSDNode>(Bit3)->getValue() != 5)
1609 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
1610 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
1611 bool X86::isUNPCKLMask(SDNode *N) {
1612 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1614 unsigned NumElems = N->getNumOperands();
1615 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
1618 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
1619 SDOperand BitI = N->getOperand(i);
1620 SDOperand BitI1 = N->getOperand(i+1);
1622 if (BitI.getOpcode() != ISD::UNDEF) {
1623 assert(isa<ConstantSDNode>(BitI) && "Invalid VECTOR_SHUFFLE mask!");
1624 if (cast<ConstantSDNode>(BitI)->getValue() != j)
1628 if (BitI1.getOpcode() != ISD::UNDEF) {
1629 assert(isa<ConstantSDNode>(BitI1) && "Invalid VECTOR_SHUFFLE mask!");
1630 if (cast<ConstantSDNode>(BitI1)->getValue() != j + NumElems)
1638 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
1639 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
1640 bool X86::isUNPCKHMask(SDNode *N) {
1641 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1643 unsigned NumElems = N->getNumOperands();
1644 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
1647 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
1648 SDOperand BitI = N->getOperand(i);
1649 SDOperand BitI1 = N->getOperand(i+1);
1651 if (BitI.getOpcode() != ISD::UNDEF) {
1652 assert(isa<ConstantSDNode>(BitI) && "Invalid VECTOR_SHUFFLE mask!");
1653 if (cast<ConstantSDNode>(BitI)->getValue() != j + NumElems/2)
1657 if (BitI1.getOpcode() != ISD::UNDEF) {
1658 assert(isa<ConstantSDNode>(BitI1) && "Invalid VECTOR_SHUFFLE mask!");
1659 if (cast<ConstantSDNode>(BitI1)->getValue() != j + NumElems/2 + NumElems)
1667 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
1668 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
1670 bool X86::isUNPCKL_v_undef_Mask(SDNode *N) {
1671 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1673 unsigned NumElems = N->getNumOperands();
1674 if (NumElems != 4 && NumElems != 8 && NumElems != 16)
1677 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
1678 SDOperand BitI = N->getOperand(i);
1679 SDOperand BitI1 = N->getOperand(i+1);
1681 if (BitI.getOpcode() != ISD::UNDEF) {
1682 assert(isa<ConstantSDNode>(BitI) && "Invalid VECTOR_SHUFFLE mask!");
1683 if (cast<ConstantSDNode>(BitI)->getValue() != j)
1687 if (BitI1.getOpcode() != ISD::UNDEF) {
1688 assert(isa<ConstantSDNode>(BitI1) && "Invalid VECTOR_SHUFFLE mask!");
1689 if (cast<ConstantSDNode>(BitI1)->getValue() != j)
1698 /// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
1699 /// a splat of a single element.
1700 bool X86::isSplatMask(SDNode *N) {
1701 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1703 // We can only splat 64-bit, and 32-bit quantities.
1704 if (N->getNumOperands() != 4 && N->getNumOperands() != 2)
1707 // This is a splat operation if each element of the permute is the same, and
1708 // if the value doesn't reference the second vector.
1709 SDOperand Elt = N->getOperand(0);
1710 assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
1711 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) {
1712 SDOperand Arg = N->getOperand(i);
1713 if (Arg.getOpcode() == ISD::UNDEF) continue;
1714 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1715 if (Arg != Elt) return false;
1718 // Make sure it is a splat of the first vector operand.
1719 return cast<ConstantSDNode>(Elt)->getValue() < N->getNumOperands();
1722 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
1723 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
1725 unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
1726 unsigned NumOperands = N->getNumOperands();
1727 unsigned Shift = (NumOperands == 4) ? 2 : 1;
1729 for (unsigned i = 0; i < NumOperands; ++i) {
1731 SDOperand Arg = N->getOperand(NumOperands-i-1);
1732 if (Arg.getOpcode() != ISD::UNDEF)
1733 Val = cast<ConstantSDNode>(Arg)->getValue();
1734 if (Val >= NumOperands) Val -= NumOperands;
1736 if (i != NumOperands - 1)
1743 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
1744 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
1746 unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) {
1748 // 8 nodes, but we only care about the last 4.
1749 for (unsigned i = 7; i >= 4; --i) {
1751 SDOperand Arg = N->getOperand(i);
1752 if (Arg.getOpcode() != ISD::UNDEF)
1753 Val = cast<ConstantSDNode>(Arg)->getValue();
1762 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
1763 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
1765 unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) {
1767 // 8 nodes, but we only care about the first 4.
1768 for (int i = 3; i >= 0; --i) {
1770 SDOperand Arg = N->getOperand(i);
1771 if (Arg.getOpcode() != ISD::UNDEF)
1772 Val = cast<ConstantSDNode>(Arg)->getValue();
1781 /// NormalizeVectorShuffle - Swap vector_shuffle operands (as well as
1782 /// values in ther permute mask if needed. Use V1 as second vector if it is
1783 /// undef. Return an empty SDOperand is it is already well formed.
1784 static SDOperand NormalizeVectorShuffle(SDOperand Op, SelectionDAG &DAG) {
1785 SDOperand V1 = Op.getOperand(0);
1786 SDOperand V2 = Op.getOperand(1);
1787 SDOperand Mask = Op.getOperand(2);
1788 MVT::ValueType VT = Op.getValueType();
1789 unsigned NumElems = Mask.getNumOperands();
1790 SDOperand Half1 = Mask.getOperand(0);
1791 SDOperand Half2 = Mask.getOperand(NumElems/2);
1792 bool V2Undef = false;
1793 if (V2.getOpcode() == ISD::UNDEF) {
1798 if (cast<ConstantSDNode>(Half1)->getValue() >= NumElems &&
1799 cast<ConstantSDNode>(Half2)->getValue() < NumElems) {
1800 // Swap the operands and change mask.
1801 std::vector<SDOperand> MaskVec;
1802 for (unsigned i = NumElems / 2; i != NumElems; ++i)
1803 MaskVec.push_back(Mask.getOperand(i));
1804 for (unsigned i = 0; i != NumElems / 2; ++i)
1805 MaskVec.push_back(Mask.getOperand(i));
1807 DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), MaskVec);
1808 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V2, V1, Mask);
1812 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask);
1817 /// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand
1818 /// specifies a 8 element shuffle that can be broken into a pair of
1819 /// PSHUFHW and PSHUFLW.
1820 static bool isPSHUFHW_PSHUFLWMask(SDNode *N) {
1821 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1823 if (N->getNumOperands() != 8)
1826 // Lower quadword shuffled.
1827 for (unsigned i = 0; i != 4; ++i) {
1828 SDOperand Arg = N->getOperand(i);
1829 if (Arg.getOpcode() == ISD::UNDEF) continue;
1830 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1831 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1836 // Upper quadword shuffled.
1837 for (unsigned i = 4; i != 8; ++i) {
1838 SDOperand Arg = N->getOperand(i);
1839 if (Arg.getOpcode() == ISD::UNDEF) continue;
1840 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1841 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1842 if (Val < 4 || Val > 7)
1849 /// LowerOperation - Provide custom lowering hooks for some operations.
1851 SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
1852 switch (Op.getOpcode()) {
1853 default: assert(0 && "Should not custom lower this!");
1854 case ISD::SHL_PARTS:
1855 case ISD::SRA_PARTS:
1856 case ISD::SRL_PARTS: {
1857 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
1858 "Not an i64 shift!");
1859 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
1860 SDOperand ShOpLo = Op.getOperand(0);
1861 SDOperand ShOpHi = Op.getOperand(1);
1862 SDOperand ShAmt = Op.getOperand(2);
1863 SDOperand Tmp1 = isSRA ? DAG.getNode(ISD::SRA, MVT::i32, ShOpHi,
1864 DAG.getConstant(31, MVT::i8))
1865 : DAG.getConstant(0, MVT::i32);
1867 SDOperand Tmp2, Tmp3;
1868 if (Op.getOpcode() == ISD::SHL_PARTS) {
1869 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt);
1870 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt);
1872 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt);
1873 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt);
1876 SDOperand InFlag = DAG.getNode(X86ISD::TEST, MVT::Flag,
1877 ShAmt, DAG.getConstant(32, MVT::i8));
1880 SDOperand CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
1882 std::vector<MVT::ValueType> Tys;
1883 Tys.push_back(MVT::i32);
1884 Tys.push_back(MVT::Flag);
1885 std::vector<SDOperand> Ops;
1886 if (Op.getOpcode() == ISD::SHL_PARTS) {
1887 Ops.push_back(Tmp2);
1888 Ops.push_back(Tmp3);
1890 Ops.push_back(InFlag);
1891 Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1892 InFlag = Hi.getValue(1);
1895 Ops.push_back(Tmp3);
1896 Ops.push_back(Tmp1);
1898 Ops.push_back(InFlag);
1899 Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1901 Ops.push_back(Tmp2);
1902 Ops.push_back(Tmp3);
1904 Ops.push_back(InFlag);
1905 Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1906 InFlag = Lo.getValue(1);
1909 Ops.push_back(Tmp3);
1910 Ops.push_back(Tmp1);
1912 Ops.push_back(InFlag);
1913 Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
1917 Tys.push_back(MVT::i32);
1918 Tys.push_back(MVT::i32);
1922 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
1924 case ISD::SINT_TO_FP: {
1925 assert(Op.getOperand(0).getValueType() <= MVT::i64 &&
1926 Op.getOperand(0).getValueType() >= MVT::i16 &&
1927 "Unknown SINT_TO_FP to lower!");
1930 MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
1931 unsigned Size = MVT::getSizeInBits(SrcVT)/8;
1932 MachineFunction &MF = DAG.getMachineFunction();
1933 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
1934 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1935 SDOperand Chain = DAG.getNode(ISD::STORE, MVT::Other,
1936 DAG.getEntryNode(), Op.getOperand(0),
1937 StackSlot, DAG.getSrcValue(NULL));
1940 std::vector<MVT::ValueType> Tys;
1941 Tys.push_back(MVT::f64);
1942 Tys.push_back(MVT::Other);
1943 if (X86ScalarSSE) Tys.push_back(MVT::Flag);
1944 std::vector<SDOperand> Ops;
1945 Ops.push_back(Chain);
1946 Ops.push_back(StackSlot);
1947 Ops.push_back(DAG.getValueType(SrcVT));
1948 Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
1952 Chain = Result.getValue(1);
1953 SDOperand InFlag = Result.getValue(2);
1955 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
1956 // shouldn't be necessary except that RFP cannot be live across
1957 // multiple blocks. When stackifier is fixed, they can be uncoupled.
1958 MachineFunction &MF = DAG.getMachineFunction();
1959 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
1960 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1961 std::vector<MVT::ValueType> Tys;
1962 Tys.push_back(MVT::Other);
1963 std::vector<SDOperand> Ops;
1964 Ops.push_back(Chain);
1965 Ops.push_back(Result);
1966 Ops.push_back(StackSlot);
1967 Ops.push_back(DAG.getValueType(Op.getValueType()));
1968 Ops.push_back(InFlag);
1969 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
1970 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot,
1971 DAG.getSrcValue(NULL));
1976 case ISD::FP_TO_SINT: {
1977 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
1978 "Unknown FP_TO_SINT to lower!");
1979 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
1981 MachineFunction &MF = DAG.getMachineFunction();
1982 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
1983 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
1984 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1987 switch (Op.getValueType()) {
1988 default: assert(0 && "Invalid FP_TO_SINT to lower!");
1989 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
1990 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
1991 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
1994 SDOperand Chain = DAG.getEntryNode();
1995 SDOperand Value = Op.getOperand(0);
1997 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
1998 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value, StackSlot,
1999 DAG.getSrcValue(0));
2000 std::vector<MVT::ValueType> Tys;
2001 Tys.push_back(MVT::f64);
2002 Tys.push_back(MVT::Other);
2003 std::vector<SDOperand> Ops;
2004 Ops.push_back(Chain);
2005 Ops.push_back(StackSlot);
2006 Ops.push_back(DAG.getValueType(Op.getOperand(0).getValueType()));
2007 Value = DAG.getNode(X86ISD::FLD, Tys, Ops);
2008 Chain = Value.getValue(1);
2009 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
2010 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
2013 // Build the FP_TO_INT*_IN_MEM
2014 std::vector<SDOperand> Ops;
2015 Ops.push_back(Chain);
2016 Ops.push_back(Value);
2017 Ops.push_back(StackSlot);
2018 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops);
2021 return DAG.getLoad(Op.getValueType(), FIST, StackSlot,
2022 DAG.getSrcValue(NULL));
2024 case ISD::READCYCLECOUNTER: {
2025 std::vector<MVT::ValueType> Tys;
2026 Tys.push_back(MVT::Other);
2027 Tys.push_back(MVT::Flag);
2028 std::vector<SDOperand> Ops;
2029 Ops.push_back(Op.getOperand(0));
2030 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, Ops);
2032 Ops.push_back(DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)));
2033 Ops.push_back(DAG.getCopyFromReg(Ops[0].getValue(1), X86::EDX,
2034 MVT::i32, Ops[0].getValue(2)));
2035 Ops.push_back(Ops[1].getValue(1));
2036 Tys[0] = Tys[1] = MVT::i32;
2037 Tys.push_back(MVT::Other);
2038 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
2041 MVT::ValueType VT = Op.getValueType();
2042 const Type *OpNTy = MVT::getTypeForValueType(VT);
2043 std::vector<Constant*> CV;
2044 if (VT == MVT::f64) {
2045 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63))));
2046 CV.push_back(ConstantFP::get(OpNTy, 0.0));
2048 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31))));
2049 CV.push_back(ConstantFP::get(OpNTy, 0.0));
2050 CV.push_back(ConstantFP::get(OpNTy, 0.0));
2051 CV.push_back(ConstantFP::get(OpNTy, 0.0));
2053 Constant *CS = ConstantStruct::get(CV);
2054 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
2056 = DAG.getNode(X86ISD::LOAD_PACK,
2057 VT, DAG.getEntryNode(), CPIdx, DAG.getSrcValue(NULL));
2058 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask);
2061 MVT::ValueType VT = Op.getValueType();
2062 const Type *OpNTy = MVT::getTypeForValueType(VT);
2063 std::vector<Constant*> CV;
2064 if (VT == MVT::f64) {
2065 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63)));
2066 CV.push_back(ConstantFP::get(OpNTy, 0.0));
2068 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31)));
2069 CV.push_back(ConstantFP::get(OpNTy, 0.0));
2070 CV.push_back(ConstantFP::get(OpNTy, 0.0));
2071 CV.push_back(ConstantFP::get(OpNTy, 0.0));
2073 Constant *CS = ConstantStruct::get(CV);
2074 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
2076 = DAG.getNode(X86ISD::LOAD_PACK,
2077 VT, DAG.getEntryNode(), CPIdx, DAG.getSrcValue(NULL));
2078 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask);
2081 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
2083 SDOperand CC = Op.getOperand(2);
2084 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
2085 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
2088 if (translateX86CC(CC, isFP, X86CC, Flip)) {
2090 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
2091 Op.getOperand(1), Op.getOperand(0));
2093 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
2094 Op.getOperand(0), Op.getOperand(1));
2095 return DAG.getNode(X86ISD::SETCC, MVT::i8,
2096 DAG.getConstant(X86CC, MVT::i8), Cond);
2098 assert(isFP && "Illegal integer SetCC!");
2100 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
2101 Op.getOperand(0), Op.getOperand(1));
2102 std::vector<MVT::ValueType> Tys;
2103 std::vector<SDOperand> Ops;
2104 switch (SetCCOpcode) {
2105 default: assert(false && "Illegal floating point SetCC!");
2106 case ISD::SETOEQ: { // !PF & ZF
2107 Tys.push_back(MVT::i8);
2108 Tys.push_back(MVT::Flag);
2109 Ops.push_back(DAG.getConstant(X86ISD::COND_NP, MVT::i8));
2110 Ops.push_back(Cond);
2111 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
2112 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
2113 DAG.getConstant(X86ISD::COND_E, MVT::i8),
2115 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
2117 case ISD::SETUNE: { // PF | !ZF
2118 Tys.push_back(MVT::i8);
2119 Tys.push_back(MVT::Flag);
2120 Ops.push_back(DAG.getConstant(X86ISD::COND_P, MVT::i8));
2121 Ops.push_back(Cond);
2122 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
2123 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
2124 DAG.getConstant(X86ISD::COND_NE, MVT::i8),
2126 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
2132 MVT::ValueType VT = Op.getValueType();
2133 bool isFP = MVT::isFloatingPoint(VT);
2134 bool isFPStack = isFP && !X86ScalarSSE;
2135 bool isFPSSE = isFP && X86ScalarSSE;
2136 bool addTest = false;
2137 SDOperand Op0 = Op.getOperand(0);
2139 if (Op0.getOpcode() == ISD::SETCC)
2140 Op0 = LowerOperation(Op0, DAG);
2142 if (Op0.getOpcode() == X86ISD::SETCC) {
2143 // If condition flag is set by a X86ISD::CMP, then make a copy of it
2144 // (since flag operand cannot be shared). If the X86ISD::SETCC does not
2145 // have another use it will be eliminated.
2146 // If the X86ISD::SETCC has more than one use, then it's probably better
2147 // to use a test instead of duplicating the X86ISD::CMP (for register
2148 // pressure reason).
2149 if (Op0.getOperand(1).getOpcode() == X86ISD::CMP) {
2150 if (!Op0.hasOneUse()) {
2151 std::vector<MVT::ValueType> Tys;
2152 for (unsigned i = 0; i < Op0.Val->getNumValues(); ++i)
2153 Tys.push_back(Op0.Val->getValueType(i));
2154 std::vector<SDOperand> Ops;
2155 for (unsigned i = 0; i < Op0.getNumOperands(); ++i)
2156 Ops.push_back(Op0.getOperand(i));
2157 Op0 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
2160 CC = Op0.getOperand(0);
2161 Cond = Op0.getOperand(1);
2162 // Make a copy as flag result cannot be used by more than one.
2163 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
2164 Cond.getOperand(0), Cond.getOperand(1));
2166 isFPStack && !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
2173 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
2174 Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Op0, Op0);
2177 std::vector<MVT::ValueType> Tys;
2178 Tys.push_back(Op.getValueType());
2179 Tys.push_back(MVT::Flag);
2180 std::vector<SDOperand> Ops;
2181 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
2182 // condition is true.
2183 Ops.push_back(Op.getOperand(2));
2184 Ops.push_back(Op.getOperand(1));
2186 Ops.push_back(Cond);
2187 return DAG.getNode(X86ISD::CMOV, Tys, Ops);
2190 bool addTest = false;
2191 SDOperand Cond = Op.getOperand(1);
2192 SDOperand Dest = Op.getOperand(2);
2194 if (Cond.getOpcode() == ISD::SETCC)
2195 Cond = LowerOperation(Cond, DAG);
2197 if (Cond.getOpcode() == X86ISD::SETCC) {
2198 // If condition flag is set by a X86ISD::CMP, then make a copy of it
2199 // (since flag operand cannot be shared). If the X86ISD::SETCC does not
2200 // have another use it will be eliminated.
2201 // If the X86ISD::SETCC has more than one use, then it's probably better
2202 // to use a test instead of duplicating the X86ISD::CMP (for register
2203 // pressure reason).
2204 if (Cond.getOperand(1).getOpcode() == X86ISD::CMP) {
2205 if (!Cond.hasOneUse()) {
2206 std::vector<MVT::ValueType> Tys;
2207 for (unsigned i = 0; i < Cond.Val->getNumValues(); ++i)
2208 Tys.push_back(Cond.Val->getValueType(i));
2209 std::vector<SDOperand> Ops;
2210 for (unsigned i = 0; i < Cond.getNumOperands(); ++i)
2211 Ops.push_back(Cond.getOperand(i));
2212 Cond = DAG.getNode(X86ISD::SETCC, Tys, Ops);
2215 CC = Cond.getOperand(0);
2216 Cond = Cond.getOperand(1);
2217 // Make a copy as flag result cannot be used by more than one.
2218 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
2219 Cond.getOperand(0), Cond.getOperand(1));
2226 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
2227 Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Cond, Cond);
2229 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
2230 Op.getOperand(0), Op.getOperand(2), CC, Cond);
2233 SDOperand InFlag(0, 0);
2234 SDOperand Chain = Op.getOperand(0);
2236 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
2237 if (Align == 0) Align = 1;
2239 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
2240 // If not DWORD aligned, call memset if size is less than the threshold.
2241 // It knows how to align to the right boundary first.
2242 if ((Align & 3) != 0 ||
2243 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
2244 MVT::ValueType IntPtr = getPointerTy();
2245 const Type *IntPtrTy = getTargetData().getIntPtrType();
2246 std::vector<std::pair<SDOperand, const Type*> > Args;
2247 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
2248 // Extend the ubyte argument to be an int value for the call.
2249 SDOperand Val = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2));
2250 Args.push_back(std::make_pair(Val, IntPtrTy));
2251 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy));
2252 std::pair<SDOperand,SDOperand> CallResult =
2253 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false,
2254 DAG.getExternalSymbol("memset", IntPtr), Args, DAG);
2255 return CallResult.second;
2260 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2261 unsigned BytesLeft = 0;
2262 bool TwoRepStos = false;
2265 unsigned Val = ValC->getValue() & 255;
2267 // If the value is a constant, then we can potentially use larger sets.
2268 switch (Align & 3) {
2269 case 2: // WORD aligned
2271 Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
2272 BytesLeft = I->getValue() % 2;
2273 Val = (Val << 8) | Val;
2276 case 0: // DWORD aligned
2279 Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
2280 BytesLeft = I->getValue() % 4;
2282 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
2283 DAG.getConstant(2, MVT::i8));
2286 Val = (Val << 8) | Val;
2287 Val = (Val << 16) | Val;
2290 default: // Byte aligned
2292 Count = Op.getOperand(3);
2297 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT),
2299 InFlag = Chain.getValue(1);
2302 Count = Op.getOperand(3);
2303 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag);
2304 InFlag = Chain.getValue(1);
2307 Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
2308 InFlag = Chain.getValue(1);
2309 Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
2310 InFlag = Chain.getValue(1);
2312 std::vector<MVT::ValueType> Tys;
2313 Tys.push_back(MVT::Other);
2314 Tys.push_back(MVT::Flag);
2315 std::vector<SDOperand> Ops;
2316 Ops.push_back(Chain);
2317 Ops.push_back(DAG.getValueType(AVT));
2318 Ops.push_back(InFlag);
2319 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, Ops);
2322 InFlag = Chain.getValue(1);
2323 Count = Op.getOperand(3);
2324 MVT::ValueType CVT = Count.getValueType();
2325 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
2326 DAG.getConstant(3, CVT));
2327 Chain = DAG.getCopyToReg(Chain, X86::ECX, Left, InFlag);
2328 InFlag = Chain.getValue(1);
2330 Tys.push_back(MVT::Other);
2331 Tys.push_back(MVT::Flag);
2333 Ops.push_back(Chain);
2334 Ops.push_back(DAG.getValueType(MVT::i8));
2335 Ops.push_back(InFlag);
2336 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, Ops);
2337 } else if (BytesLeft) {
2338 // Issue stores for the last 1 - 3 bytes.
2340 unsigned Val = ValC->getValue() & 255;
2341 unsigned Offset = I->getValue() - BytesLeft;
2342 SDOperand DstAddr = Op.getOperand(1);
2343 MVT::ValueType AddrVT = DstAddr.getValueType();
2344 if (BytesLeft >= 2) {
2345 Value = DAG.getConstant((Val << 8) | Val, MVT::i16);
2346 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2347 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
2348 DAG.getConstant(Offset, AddrVT)),
2349 DAG.getSrcValue(NULL));
2354 if (BytesLeft == 1) {
2355 Value = DAG.getConstant(Val, MVT::i8);
2356 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2357 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
2358 DAG.getConstant(Offset, AddrVT)),
2359 DAG.getSrcValue(NULL));
2366 SDOperand Chain = Op.getOperand(0);
2368 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
2369 if (Align == 0) Align = 1;
2371 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
2372 // If not DWORD aligned, call memcpy if size is less than the threshold.
2373 // It knows how to align to the right boundary first.
2374 if ((Align & 3) != 0 ||
2375 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
2376 MVT::ValueType IntPtr = getPointerTy();
2377 const Type *IntPtrTy = getTargetData().getIntPtrType();
2378 std::vector<std::pair<SDOperand, const Type*> > Args;
2379 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
2380 Args.push_back(std::make_pair(Op.getOperand(2), IntPtrTy));
2381 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy));
2382 std::pair<SDOperand,SDOperand> CallResult =
2383 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false,
2384 DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG);
2385 return CallResult.second;
2390 unsigned BytesLeft = 0;
2391 bool TwoRepMovs = false;
2392 switch (Align & 3) {
2393 case 2: // WORD aligned
2395 Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
2396 BytesLeft = I->getValue() % 2;
2398 case 0: // DWORD aligned
2401 Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
2402 BytesLeft = I->getValue() % 4;
2404 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
2405 DAG.getConstant(2, MVT::i8));
2409 default: // Byte aligned
2411 Count = Op.getOperand(3);
2415 SDOperand InFlag(0, 0);
2416 Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
2417 InFlag = Chain.getValue(1);
2418 Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
2419 InFlag = Chain.getValue(1);
2420 Chain = DAG.getCopyToReg(Chain, X86::ESI, Op.getOperand(2), InFlag);
2421 InFlag = Chain.getValue(1);
2423 std::vector<MVT::ValueType> Tys;
2424 Tys.push_back(MVT::Other);
2425 Tys.push_back(MVT::Flag);
2426 std::vector<SDOperand> Ops;
2427 Ops.push_back(Chain);
2428 Ops.push_back(DAG.getValueType(AVT));
2429 Ops.push_back(InFlag);
2430 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, Ops);
2433 InFlag = Chain.getValue(1);
2434 Count = Op.getOperand(3);
2435 MVT::ValueType CVT = Count.getValueType();
2436 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
2437 DAG.getConstant(3, CVT));
2438 Chain = DAG.getCopyToReg(Chain, X86::ECX, Left, InFlag);
2439 InFlag = Chain.getValue(1);
2441 Tys.push_back(MVT::Other);
2442 Tys.push_back(MVT::Flag);
2444 Ops.push_back(Chain);
2445 Ops.push_back(DAG.getValueType(MVT::i8));
2446 Ops.push_back(InFlag);
2447 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, Ops);
2448 } else if (BytesLeft) {
2449 // Issue loads and stores for the last 1 - 3 bytes.
2450 unsigned Offset = I->getValue() - BytesLeft;
2451 SDOperand DstAddr = Op.getOperand(1);
2452 MVT::ValueType DstVT = DstAddr.getValueType();
2453 SDOperand SrcAddr = Op.getOperand(2);
2454 MVT::ValueType SrcVT = SrcAddr.getValueType();
2456 if (BytesLeft >= 2) {
2457 Value = DAG.getLoad(MVT::i16, Chain,
2458 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
2459 DAG.getConstant(Offset, SrcVT)),
2460 DAG.getSrcValue(NULL));
2461 Chain = Value.getValue(1);
2462 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2463 DAG.getNode(ISD::ADD, DstVT, DstAddr,
2464 DAG.getConstant(Offset, DstVT)),
2465 DAG.getSrcValue(NULL));
2470 if (BytesLeft == 1) {
2471 Value = DAG.getLoad(MVT::i8, Chain,
2472 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
2473 DAG.getConstant(Offset, SrcVT)),
2474 DAG.getSrcValue(NULL));
2475 Chain = Value.getValue(1);
2476 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2477 DAG.getNode(ISD::ADD, DstVT, DstAddr,
2478 DAG.getConstant(Offset, DstVT)),
2479 DAG.getSrcValue(NULL));
2486 // ConstantPool, GlobalAddress, and ExternalSymbol are lowered as their
2487 // target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
2488 // one of the above mentioned nodes. It has to be wrapped because otherwise
2489 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2490 // be used to form addressing mode. These wrapped nodes will be selected
2492 case ISD::ConstantPool: {
2493 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2494 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
2495 DAG.getTargetConstantPool(CP->get(), getPointerTy(),
2496 CP->getAlignment()));
2497 if (Subtarget->isTargetDarwin()) {
2498 // With PIC, the address is actually $g + Offset.
2499 if (getTargetMachine().getRelocationModel() == Reloc::PIC)
2500 Result = DAG.getNode(ISD::ADD, getPointerTy(),
2501 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
2506 case ISD::GlobalAddress: {
2507 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2508 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
2509 DAG.getTargetGlobalAddress(GV, getPointerTy()));
2510 if (Subtarget->isTargetDarwin()) {
2511 // With PIC, the address is actually $g + Offset.
2512 if (getTargetMachine().getRelocationModel() == Reloc::PIC)
2513 Result = DAG.getNode(ISD::ADD, getPointerTy(),
2514 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
2516 // For Darwin, external and weak symbols are indirect, so we want to load
2517 // the value at address GV, not the value of GV itself. This means that
2518 // the GlobalAddress must be in the base or index register of the address,
2519 // not the GV offset field.
2520 if (getTargetMachine().getRelocationModel() != Reloc::Static &&
2521 DarwinGVRequiresExtraLoad(GV))
2522 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(),
2523 Result, DAG.getSrcValue(NULL));
2528 case ISD::ExternalSymbol: {
2529 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
2530 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
2531 DAG.getTargetExternalSymbol(Sym, getPointerTy()));
2532 if (Subtarget->isTargetDarwin()) {
2533 // With PIC, the address is actually $g + Offset.
2534 if (getTargetMachine().getRelocationModel() == Reloc::PIC)
2535 Result = DAG.getNode(ISD::ADD, getPointerTy(),
2536 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
2541 case ISD::VASTART: {
2542 // vastart just stores the address of the VarArgsFrameIndex slot into the
2543 // memory location argument.
2544 // FIXME: Replace MVT::i32 with PointerTy
2545 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
2546 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
2547 Op.getOperand(1), Op.getOperand(2));
2552 switch(Op.getNumOperands()) {
2554 assert(0 && "Do not know how to return this many arguments!");
2557 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Op.getOperand(0),
2558 DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
2560 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
2561 if (MVT::isInteger(ArgVT))
2562 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::EAX, Op.getOperand(1),
2564 else if (!X86ScalarSSE) {
2565 std::vector<MVT::ValueType> Tys;
2566 Tys.push_back(MVT::Other);
2567 Tys.push_back(MVT::Flag);
2568 std::vector<SDOperand> Ops;
2569 Ops.push_back(Op.getOperand(0));
2570 Ops.push_back(Op.getOperand(1));
2571 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
2574 SDOperand Chain = Op.getOperand(0);
2575 SDOperand Value = Op.getOperand(1);
2577 if (Value.getOpcode() == ISD::LOAD &&
2578 (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) {
2579 Chain = Value.getOperand(0);
2580 MemLoc = Value.getOperand(1);
2582 // Spill the value to memory and reload it into top of stack.
2583 unsigned Size = MVT::getSizeInBits(ArgVT)/8;
2584 MachineFunction &MF = DAG.getMachineFunction();
2585 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
2586 MemLoc = DAG.getFrameIndex(SSFI, getPointerTy());
2587 Chain = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
2588 Value, MemLoc, DAG.getSrcValue(0));
2590 std::vector<MVT::ValueType> Tys;
2591 Tys.push_back(MVT::f64);
2592 Tys.push_back(MVT::Other);
2593 std::vector<SDOperand> Ops;
2594 Ops.push_back(Chain);
2595 Ops.push_back(MemLoc);
2596 Ops.push_back(DAG.getValueType(ArgVT));
2597 Copy = DAG.getNode(X86ISD::FLD, Tys, Ops);
2599 Tys.push_back(MVT::Other);
2600 Tys.push_back(MVT::Flag);
2602 Ops.push_back(Copy.getValue(1));
2603 Ops.push_back(Copy);
2604 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
2609 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::EDX, Op.getOperand(2),
2611 Copy = DAG.getCopyToReg(Copy, X86::EAX,Op.getOperand(1),Copy.getValue(1));
2614 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other,
2615 Copy, DAG.getConstant(getBytesToPopOnReturn(), MVT::i16),
2618 case ISD::SCALAR_TO_VECTOR: {
2619 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0));
2620 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt);
2622 case ISD::VECTOR_SHUFFLE: {
2623 SDOperand V1 = Op.getOperand(0);
2624 SDOperand V2 = Op.getOperand(1);
2625 SDOperand PermMask = Op.getOperand(2);
2626 MVT::ValueType VT = Op.getValueType();
2627 unsigned NumElems = PermMask.getNumOperands();
2629 // Splat && PSHUFD's 2nd vector must be undef.
2630 if (X86::isSplatMask(PermMask.Val)) {
2631 if (V2.getOpcode() != ISD::UNDEF)
2632 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
2633 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
2637 if (X86::isUNPCKLMask(PermMask.Val) ||
2638 X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
2639 X86::isUNPCKHMask(PermMask.Val))
2640 // Leave the VECTOR_SHUFFLE alone. It matches {P}UNPCKL*.
2644 return NormalizeVectorShuffle(Op, DAG);
2646 // If VT is integer, try PSHUF* first, then SHUFP*.
2647 if (MVT::isInteger(VT)) {
2648 if (X86::isPSHUFDMask(PermMask.Val) ||
2649 X86::isPSHUFHWMask(PermMask.Val) ||
2650 X86::isPSHUFLWMask(PermMask.Val)) {
2651 if (V2.getOpcode() != ISD::UNDEF)
2652 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
2653 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
2657 if (X86::isSHUFPMask(PermMask.Val))
2658 return NormalizeVectorShuffle(Op, DAG);
2660 // Handle v8i16 shuffle high / low shuffle node pair.
2661 if (VT == MVT::v8i16 && isPSHUFHW_PSHUFLWMask(PermMask.Val)) {
2662 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2663 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
2664 std::vector<SDOperand> MaskVec;
2665 for (unsigned i = 0; i != 4; ++i)
2666 MaskVec.push_back(PermMask.getOperand(i));
2667 for (unsigned i = 4; i != 8; ++i)
2668 MaskVec.push_back(DAG.getConstant(i, BaseVT));
2669 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
2670 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
2672 for (unsigned i = 0; i != 4; ++i)
2673 MaskVec.push_back(DAG.getConstant(i, BaseVT));
2674 for (unsigned i = 4; i != 8; ++i)
2675 MaskVec.push_back(PermMask.getOperand(i));
2676 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
2677 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
2680 // Floating point cases in the other order.
2681 if (X86::isSHUFPMask(PermMask.Val))
2682 return NormalizeVectorShuffle(Op, DAG);
2683 if (X86::isPSHUFDMask(PermMask.Val) ||
2684 X86::isPSHUFHWMask(PermMask.Val) ||
2685 X86::isPSHUFLWMask(PermMask.Val)) {
2686 if (V2.getOpcode() != ISD::UNDEF)
2687 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
2688 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
2695 case ISD::BUILD_VECTOR: {
2696 // All one's are handled with pcmpeqd.
2697 if (ISD::isBuildVectorAllOnes(Op.Val))
2700 std::set<SDOperand> Values;
2701 SDOperand Elt0 = Op.getOperand(0);
2702 Values.insert(Elt0);
2703 bool Elt0IsZero = (isa<ConstantSDNode>(Elt0) &&
2704 cast<ConstantSDNode>(Elt0)->getValue() == 0) ||
2705 (isa<ConstantFPSDNode>(Elt0) &&
2706 cast<ConstantFPSDNode>(Elt0)->isExactlyValue(0.0));
2707 bool RestAreZero = true;
2708 unsigned NumElems = Op.getNumOperands();
2709 for (unsigned i = 1; i < NumElems; ++i) {
2710 SDOperand Elt = Op.getOperand(i);
2711 if (ConstantFPSDNode *FPC = dyn_cast<ConstantFPSDNode>(Elt)) {
2712 if (!FPC->isExactlyValue(+0.0))
2713 RestAreZero = false;
2714 } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
2715 if (!C->isNullValue())
2716 RestAreZero = false;
2718 RestAreZero = false;
2723 if (Elt0IsZero) return Op;
2725 // Zero extend a scalar to a vector.
2726 return DAG.getNode(X86ISD::ZEXT_S2VEC, Op.getValueType(), Elt0);
2729 if (Values.size() > 2) {
2730 // Expand into a number of unpckl*.
2732 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
2733 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
2734 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
2735 MVT::ValueType VT = Op.getValueType();
2736 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2737 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
2738 std::vector<SDOperand> MaskVec;
2739 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
2740 MaskVec.push_back(DAG.getConstant(i, BaseVT));
2741 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT));
2743 SDOperand PermMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
2744 std::vector<SDOperand> V(NumElems);
2745 for (unsigned i = 0; i < NumElems; ++i)
2746 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
2748 while (NumElems != 0) {
2749 for (unsigned i = 0; i < NumElems; ++i)
2750 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems],
2759 case ISD::EXTRACT_VECTOR_ELT: {
2760 if (!isa<ConstantSDNode>(Op.getOperand(1)))
2763 MVT::ValueType VT = Op.getValueType();
2764 if (MVT::getSizeInBits(VT) == 16) {
2765 // Transform it so it match pextrw which produces a 32-bit result.
2766 MVT::ValueType EVT = (MVT::ValueType)(VT+1);
2767 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT,
2768 Op.getOperand(0), Op.getOperand(1));
2769 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract,
2770 DAG.getValueType(VT));
2771 return DAG.getNode(ISD::TRUNCATE, VT, Assert);
2772 } else if (MVT::getSizeInBits(VT) == 32) {
2773 SDOperand Vec = Op.getOperand(0);
2774 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
2778 // TODO: if Idex == 2, we can use unpckhps
2779 // SHUFPS the element to the lowest double word, then movss.
2780 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
2781 SDOperand IdxNode = DAG.getConstant((Idx < 2) ? Idx : Idx+4,
2782 MVT::getVectorBaseType(MaskVT));
2783 std::vector<SDOperand> IdxVec;
2784 IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorBaseType(MaskVT)));
2785 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
2786 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
2787 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
2788 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, IdxVec);
2789 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
2791 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
2792 DAG.getConstant(0, MVT::i32));
2793 } else if (MVT::getSizeInBits(VT) == 64) {
2794 SDOperand Vec = Op.getOperand(0);
2795 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
2799 // UNPCKHPD the element to the lowest double word, then movsd.
2800 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
2801 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
2802 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
2803 std::vector<SDOperand> IdxVec;
2804 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorBaseType(MaskVT)));
2805 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
2806 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, IdxVec);
2807 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
2808 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
2809 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
2810 DAG.getConstant(0, MVT::i32));
2815 case ISD::INSERT_VECTOR_ELT: {
2816 // Transform it so it match pinsrw which expects a 16-bit value in a R32
2817 // as its second argument.
2818 MVT::ValueType VT = Op.getValueType();
2819 MVT::ValueType BaseVT = MVT::getVectorBaseType(VT);
2820 if (MVT::getSizeInBits(BaseVT) == 16) {
2821 SDOperand N1 = Op.getOperand(1);
2822 SDOperand N2 = Op.getOperand(2);
2823 if (N1.getValueType() != MVT::i32)
2824 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
2825 if (N2.getValueType() != MVT::i32)
2826 N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(), MVT::i32);
2827 return DAG.getNode(X86ISD::PINSRW, VT, Op.getOperand(0), N1, N2);
2835 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
2837 default: return NULL;
2838 case X86ISD::SHLD: return "X86ISD::SHLD";
2839 case X86ISD::SHRD: return "X86ISD::SHRD";
2840 case X86ISD::FAND: return "X86ISD::FAND";
2841 case X86ISD::FXOR: return "X86ISD::FXOR";
2842 case X86ISD::FILD: return "X86ISD::FILD";
2843 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
2844 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
2845 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
2846 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
2847 case X86ISD::FLD: return "X86ISD::FLD";
2848 case X86ISD::FST: return "X86ISD::FST";
2849 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT";
2850 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT";
2851 case X86ISD::CALL: return "X86ISD::CALL";
2852 case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
2853 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
2854 case X86ISD::CMP: return "X86ISD::CMP";
2855 case X86ISD::TEST: return "X86ISD::TEST";
2856 case X86ISD::SETCC: return "X86ISD::SETCC";
2857 case X86ISD::CMOV: return "X86ISD::CMOV";
2858 case X86ISD::BRCOND: return "X86ISD::BRCOND";
2859 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
2860 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
2861 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
2862 case X86ISD::LOAD_PACK: return "X86ISD::LOAD_PACK";
2863 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
2864 case X86ISD::Wrapper: return "X86ISD::Wrapper";
2865 case X86ISD::S2VEC: return "X86ISD::S2VEC";
2866 case X86ISD::ZEXT_S2VEC: return "X86ISD::ZEXT_S2VEC";
2867 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
2868 case X86ISD::PINSRW: return "X86ISD::PINSRW";
2872 void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
2874 uint64_t &KnownZero,
2876 unsigned Depth) const {
2877 unsigned Opc = Op.getOpcode();
2878 assert((Opc >= ISD::BUILTIN_OP_END ||
2879 Opc == ISD::INTRINSIC_WO_CHAIN ||
2880 Opc == ISD::INTRINSIC_W_CHAIN ||
2881 Opc == ISD::INTRINSIC_VOID) &&
2882 "Should use MaskedValueIsZero if you don't know whether Op"
2883 " is a target node!");
2885 KnownZero = KnownOne = 0; // Don't know anything.
2889 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
2894 std::vector<unsigned> X86TargetLowering::
2895 getRegClassForInlineAsmConstraint(const std::string &Constraint,
2896 MVT::ValueType VT) const {
2897 if (Constraint.size() == 1) {
2898 // FIXME: not handling fp-stack yet!
2899 // FIXME: not handling MMX registers yet ('y' constraint).
2900 switch (Constraint[0]) { // GCC X86 Constraint Letters
2901 default: break; // Unknown constriant letter
2902 case 'r': // GENERAL_REGS
2903 case 'R': // LEGACY_REGS
2904 return make_vector<unsigned>(X86::EAX, X86::EBX, X86::ECX, X86::EDX,
2905 X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0);
2906 case 'l': // INDEX_REGS
2907 return make_vector<unsigned>(X86::EAX, X86::EBX, X86::ECX, X86::EDX,
2908 X86::ESI, X86::EDI, X86::EBP, 0);
2909 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
2911 return make_vector<unsigned>(X86::EAX, X86::EBX, X86::ECX, X86::EDX, 0);
2912 case 'x': // SSE_REGS if SSE1 allowed
2913 if (Subtarget->hasSSE1())
2914 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2915 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
2917 return std::vector<unsigned>();
2918 case 'Y': // SSE_REGS if SSE2 allowed
2919 if (Subtarget->hasSSE2())
2920 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2921 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
2923 return std::vector<unsigned>();
2927 return std::vector<unsigned>();
2930 /// isLegalAddressImmediate - Return true if the integer value or
2931 /// GlobalValue can be used as the offset of the target addressing mode.
2932 bool X86TargetLowering::isLegalAddressImmediate(int64_t V) const {
2933 // X86 allows a sign-extended 32-bit immediate field.
2934 return (V > -(1LL << 32) && V < (1LL << 32)-1);
2937 bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const {
2938 if (Subtarget->isTargetDarwin()) {
2939 Reloc::Model RModel = getTargetMachine().getRelocationModel();
2940 if (RModel == Reloc::Static)
2942 else if (RModel == Reloc::DynamicNoPIC)
2943 return !DarwinGVRequiresExtraLoad(GV);
2950 /// isShuffleMaskLegal - Targets can use this to indicate that they only
2951 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
2952 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
2953 /// are assumed to be legal.
2955 X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
2956 // Only do shuffles on 128-bit vector types for now.
2957 if (MVT::getSizeInBits(VT) == 64) return false;
2958 return (Mask.Val->getNumOperands() == 2 ||
2959 X86::isSplatMask(Mask.Val) ||
2960 X86::isPSHUFDMask(Mask.Val) ||
2961 isPSHUFHW_PSHUFLWMask(Mask.Val) ||
2962 X86::isSHUFPMask(Mask.Val) ||
2963 X86::isUNPCKLMask(Mask.Val) ||
2964 X86::isUNPCKL_v_undef_Mask(Mask.Val) ||
2965 X86::isUNPCKHMask(Mask.Val));