1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
16 #include "X86InstrBuilder.h"
17 #include "X86ISelLowering.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/ADT/VectorExtras.h"
25 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/SelectionDAG.h"
30 #include "llvm/CodeGen/SSARegMap.h"
31 #include "llvm/Support/MathExtras.h"
32 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
38 cl::desc("Enable fastcc on X86"));
40 X86TargetLowering::X86TargetLowering(TargetMachine &TM)
41 : TargetLowering(TM) {
42 Subtarget = &TM.getSubtarget<X86Subtarget>();
43 X86ScalarSSE = Subtarget->hasSSE2();
45 // Set up the TargetLowering object.
47 // X86 is weird, it always uses i8 for shift amounts and setcc results.
48 setShiftAmountType(MVT::i8);
49 setSetCCResultType(MVT::i8);
50 setSetCCResultContents(ZeroOrOneSetCCResult);
51 setSchedulingPreference(SchedulingForRegPressure);
52 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
53 setStackPointerRegisterToSaveRestore(X86::ESP);
55 if (!Subtarget->isTargetDarwin())
56 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
57 setUseUnderscoreSetJmpLongJmp(true);
59 // Add legal addressing mode scale values.
60 addLegalAddressScale(8);
61 addLegalAddressScale(4);
62 addLegalAddressScale(2);
63 // Enter the ones which require both scale + index last. These are more
65 addLegalAddressScale(9);
66 addLegalAddressScale(5);
67 addLegalAddressScale(3);
69 // Set up the register classes.
70 addRegisterClass(MVT::i8, X86::R8RegisterClass);
71 addRegisterClass(MVT::i16, X86::R16RegisterClass);
72 addRegisterClass(MVT::i32, X86::R32RegisterClass);
74 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
76 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
77 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
78 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
81 // No SSE i64 SINT_TO_FP, so expand i32 UINT_TO_FP instead.
82 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
84 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
86 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
88 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
89 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
90 // SSE has no i16 to fp conversion, only i32
92 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
94 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
95 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
98 // We can handle SINT_TO_FP and FP_TO_SINT from/to i64 even though i64
100 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
101 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
103 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
105 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
106 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
109 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
111 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
112 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
115 // Handle FP_TO_UINT by promoting the destination to a larger signed
117 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
118 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
119 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
121 if (X86ScalarSSE && !Subtarget->hasSSE3())
122 // Expand FP_TO_UINT into a select.
123 // FIXME: We would like to use a Custom expander here eventually to do
124 // the optimal thing for SSE vs. the default expansion in the legalizer.
125 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
127 // With SSE3 we can use fisttpll to convert to a signed i64.
128 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
130 setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
131 setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
133 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
134 setOperationAction(ISD::BR_CC , MVT::Other, Expand);
135 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
136 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
137 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
138 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
139 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
140 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
141 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
142 setOperationAction(ISD::FREM , MVT::f64 , Expand);
143 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
144 setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
145 setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
146 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
147 setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
148 setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
149 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
150 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
151 setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
152 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
153 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
155 // These should be promoted to a larger select which is supported.
156 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
157 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
159 // X86 wants to expand cmov itself.
160 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
161 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
162 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
163 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
164 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
165 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
166 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
167 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
168 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
169 // X86 ret instruction may pop stack.
170 setOperationAction(ISD::RET , MVT::Other, Custom);
172 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
173 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
174 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
175 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
176 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
177 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
178 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
179 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
180 // X86 wants to expand memset / memcpy itself.
181 setOperationAction(ISD::MEMSET , MVT::Other, Custom);
182 setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
184 // We don't have line number support yet.
185 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
186 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
187 // FIXME - use subtarget debug flags
188 if (!Subtarget->isTargetDarwin())
189 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
191 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
192 setOperationAction(ISD::VASTART , MVT::Other, Custom);
194 // Use the default implementation.
195 setOperationAction(ISD::VAARG , MVT::Other, Expand);
196 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
197 setOperationAction(ISD::VAEND , MVT::Other, Expand);
198 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
199 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
200 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
202 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
203 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
206 // Set up the FP register classes.
207 addRegisterClass(MVT::f32, X86::FR32RegisterClass);
208 addRegisterClass(MVT::f64, X86::FR64RegisterClass);
210 // Use ANDPD to simulate FABS.
211 setOperationAction(ISD::FABS , MVT::f64, Custom);
212 setOperationAction(ISD::FABS , MVT::f32, Custom);
214 // Use XORP to simulate FNEG.
215 setOperationAction(ISD::FNEG , MVT::f64, Custom);
216 setOperationAction(ISD::FNEG , MVT::f32, Custom);
218 // We don't support sin/cos/fmod
219 setOperationAction(ISD::FSIN , MVT::f64, Expand);
220 setOperationAction(ISD::FCOS , MVT::f64, Expand);
221 setOperationAction(ISD::FREM , MVT::f64, Expand);
222 setOperationAction(ISD::FSIN , MVT::f32, Expand);
223 setOperationAction(ISD::FCOS , MVT::f32, Expand);
224 setOperationAction(ISD::FREM , MVT::f32, Expand);
226 // Expand FP immediates into loads from the stack, except for the special
228 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
229 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
230 addLegalFPImmediate(+0.0); // xorps / xorpd
232 // Set up the FP register classes.
233 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
235 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
238 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
239 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
242 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
243 addLegalFPImmediate(+0.0); // FLD0
244 addLegalFPImmediate(+1.0); // FLD1
245 addLegalFPImmediate(-0.0); // FLD0/FCHS
246 addLegalFPImmediate(-1.0); // FLD1/FCHS
249 // First set operation action for all vector types to expand. Then we
250 // will selectively turn on ones that can be effectively codegen'd.
251 for (unsigned VT = (unsigned)MVT::Vector + 1;
252 VT != (unsigned)MVT::LAST_VALUETYPE; VT++) {
253 setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
254 setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
255 setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
256 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand);
257 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand);
258 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
259 setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
262 if (Subtarget->hasMMX()) {
263 addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
264 addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
265 addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
267 // FIXME: add MMX packed arithmetics
268 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Expand);
269 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Expand);
270 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Expand);
273 if (Subtarget->hasSSE1()) {
274 addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
276 setOperationAction(ISD::AND, MVT::v4f32, Legal);
277 setOperationAction(ISD::OR, MVT::v4f32, Legal);
278 setOperationAction(ISD::XOR, MVT::v4f32, Legal);
279 setOperationAction(ISD::ADD, MVT::v4f32, Legal);
280 setOperationAction(ISD::SUB, MVT::v4f32, Legal);
281 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
282 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
283 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
284 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
285 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
286 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
289 if (Subtarget->hasSSE2()) {
290 addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
291 addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
292 addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
293 addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
294 addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
296 setOperationAction(ISD::ADD, MVT::v2f64, Legal);
297 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
298 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
299 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
300 setOperationAction(ISD::SUB, MVT::v2f64, Legal);
301 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
302 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
303 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
304 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
305 setOperationAction(ISD::MUL, MVT::v2f64, Legal);
307 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
308 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
309 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
310 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
311 // Implement v4f32 insert_vector_elt in terms of SSE2 v8i16 ones.
312 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
314 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
315 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
316 setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom);
317 setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom);
318 setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom);
320 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
321 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
322 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
323 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
324 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
325 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
327 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
328 for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
329 setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote);
330 AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64);
331 setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote);
332 AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64);
333 setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote);
334 AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64);
335 setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote);
336 AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64);
337 setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
338 AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64);
341 // Custom lower v2i64 and v2f64 selects.
342 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
343 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
344 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
345 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
348 // We want to custom lower some of our intrinsics.
349 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
351 computeRegisterProperties();
353 // FIXME: These should be based on subtarget info. Plus, the values should
354 // be smaller when we are in optimizing for size mode.
355 maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores
356 maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores
357 maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores
358 allowUnalignedMemoryAccesses = true; // x86 supports it!
361 std::vector<SDOperand>
362 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
363 std::vector<SDOperand> Args = TargetLowering::LowerArguments(F, DAG);
366 FormalArgLocs.clear();
368 // This sets BytesToPopOnReturn, BytesCallerReserves, etc. which have to be set
369 // before the rest of the function can be lowered.
370 if (F.getCallingConv() == CallingConv::Fast && EnableFastCC)
371 PreprocessFastCCArguments(Args, F, DAG);
373 PreprocessCCCArguments(Args, F, DAG);
377 std::pair<SDOperand, SDOperand>
378 X86TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
379 bool isVarArg, unsigned CallingConv,
381 SDOperand Callee, ArgListTy &Args,
383 assert((!isVarArg || CallingConv == CallingConv::C) &&
384 "Only C takes varargs!");
386 // If the callee is a GlobalAddress node (quite common, every direct call is)
387 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
388 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
389 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
390 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
391 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
393 if (CallingConv == CallingConv::Fast && EnableFastCC)
394 return LowerFastCCCallTo(Chain, RetTy, isTailCall, Callee, Args, DAG);
395 return LowerCCCCallTo(Chain, RetTy, isVarArg, isTailCall, Callee, Args, DAG);
398 //===----------------------------------------------------------------------===//
399 // C Calling Convention implementation
400 //===----------------------------------------------------------------------===//
402 /// AddLiveIn - This helper function adds the specified physical register to the
403 /// MachineFunction as a live in value. It also creates a corresponding virtual
405 static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
406 TargetRegisterClass *RC) {
407 assert(RC->contains(PReg) && "Not the correct regclass!");
408 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
409 MF.addLiveIn(PReg, VReg);
413 /// HowToPassCCCArgument - Returns how an formal argument of the specified type
414 /// should be passed. If it is through stack, returns the size of the stack
415 /// frame; if it is through XMM register, returns the number of XMM registers
418 HowToPassCCCArgument(MVT::ValueType ObjectVT, unsigned NumXMMRegs,
419 unsigned &ObjSize, unsigned &ObjXMMRegs) {
421 default: assert(0 && "Unhandled argument type!");
423 case MVT::i8: ObjSize = 1; break;
424 case MVT::i16: ObjSize = 2; break;
425 case MVT::i32: ObjSize = 4; break;
426 case MVT::i64: ObjSize = 8; break;
427 case MVT::f32: ObjSize = 4; break;
428 case MVT::f64: ObjSize = 8; break;
443 /// getFormalArgObjects - Returns itself if Op is a FORMAL_ARGUMENTS, otherwise
444 /// returns the FORMAL_ARGUMENTS node(s) that made up parts of the node.
445 static std::vector<SDOperand> getFormalArgObjects(SDOperand Op) {
446 unsigned Opc = Op.getOpcode();
447 std::vector<SDOperand> Objs;
448 if (Opc == ISD::TRUNCATE) {
449 Op = Op.getOperand(0);
450 assert(Op.getOpcode() == ISD::AssertSext ||
451 Op.getOpcode() == ISD::AssertZext);
452 Objs.push_back(Op.getOperand(0));
453 } else if (Opc == ISD::FP_ROUND || Opc == ISD::VBIT_CONVERT) {
454 Objs.push_back(Op.getOperand(0));
455 } else if (Opc == ISD::BUILD_PAIR) {
456 Objs.push_back(Op.getOperand(0));
457 Objs.push_back(Op.getOperand(1));
464 void X86TargetLowering::PreprocessCCCArguments(std::vector<SDOperand>Args,
465 Function &F, SelectionDAG &DAG) {
466 unsigned NumArgs = Args.size();
467 MachineFunction &MF = DAG.getMachineFunction();
468 MachineFrameInfo *MFI = MF.getFrameInfo();
470 // Add DAG nodes to load the arguments... On entry to a function on the X86,
471 // the stack frame looks like this:
473 // [ESP] -- return address
474 // [ESP + 4] -- first argument (leftmost lexically)
475 // [ESP + 8] -- second argument, if first argument is four bytes in size
478 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
479 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
480 unsigned XMMArgRegs[] = { X86::XMM0, X86::XMM1, X86::XMM2 };
481 for (unsigned i = 0; i < NumArgs; ++i) {
482 SDOperand Op = Args[i];
483 std::vector<SDOperand> Objs = getFormalArgObjects(Op);
484 for (std::vector<SDOperand>::iterator I = Objs.begin(), E = Objs.end();
487 MVT::ValueType ObjectVT = Obj.getValueType();
488 unsigned ArgIncrement = 4;
489 unsigned ObjSize = 0;
490 unsigned ObjXMMRegs = 0;
491 HowToPassCCCArgument(ObjectVT, NumXMMRegs, ObjSize, ObjXMMRegs);
493 ArgIncrement = ObjSize;
496 // Passed in a XMM register.
497 unsigned Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
498 X86::VR128RegisterClass);
499 std::pair<FALocInfo, FALocInfo> Loc =
500 std::make_pair(FALocInfo(FALocInfo::LiveInRegLoc, Reg, ObjectVT),
502 FormalArgLocs.push_back(Loc);
503 NumXMMRegs += ObjXMMRegs;
505 // Create the frame index object for this incoming parameter...
506 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
507 std::pair<FALocInfo, FALocInfo> Loc =
508 std::make_pair(FALocInfo(FALocInfo::StackFrameLoc, FI), FALocInfo());
509 FormalArgLocs.push_back(Loc);
510 ArgOffset += ArgIncrement; // Move on to the next argument...
515 // If the function takes variable number of arguments, make a frame index for
516 // the start of the first vararg value... for expansion of llvm.va_start.
518 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
519 ReturnAddrIndex = 0; // No return address slot generated yet.
520 BytesToPopOnReturn = 0; // Callee pops nothing.
521 BytesCallerReserves = ArgOffset;
524 void X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) {
525 unsigned NumArgs = Op.Val->getNumValues();
526 MachineFunction &MF = DAG.getMachineFunction();
528 for (unsigned i = 0; i < NumArgs; ++i) {
529 std::pair<FALocInfo, FALocInfo> Loc = FormalArgLocs[i];
531 if (Loc.first.Kind == FALocInfo::StackFrameLoc) {
532 // Create the SelectionDAG nodes corresponding to a load from this parameter
533 unsigned FI = FormalArgLocs[i].first.Loc;
534 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
535 ArgValue = DAG.getLoad(Op.Val->getValueType(i),
536 DAG.getEntryNode(), FIN, DAG.getSrcValue(NULL));
538 // Must be a CopyFromReg
539 ArgValue= DAG.getCopyFromReg(DAG.getEntryNode(), Loc.first.Loc,
542 FormalArgs.push_back(ArgValue);
546 std::pair<SDOperand, SDOperand>
547 X86TargetLowering::LowerCCCCallTo(SDOperand Chain, const Type *RetTy,
548 bool isVarArg, bool isTailCall,
549 SDOperand Callee, ArgListTy &Args,
551 // Count how many bytes are to be pushed on the stack.
552 unsigned NumBytes = 0;
554 // Keep track of the number of XMM regs passed so far.
555 unsigned NumXMMRegs = 0;
556 unsigned XMMArgRegs[] = { X86::XMM0, X86::XMM1, X86::XMM2 };
558 std::vector<SDOperand> RegValuesToPass;
561 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(0, getPointerTy()));
563 for (unsigned i = 0, e = Args.size(); i != e; ++i)
564 switch (getValueType(Args[i].second)) {
565 default: assert(0 && "Unknown value type!");
585 Chain = DAG.getCALLSEQ_START(Chain,
586 DAG.getConstant(NumBytes, getPointerTy()));
588 // Arguments go on the stack in reverse order, as specified by the ABI.
589 unsigned ArgOffset = 0;
591 SDOperand StackPtr = DAG.getRegister(X86::ESP, MVT::i32);
592 std::vector<SDOperand> Stores;
593 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
594 switch (getValueType(Args[i].second)) {
595 default: assert(0 && "Unexpected ValueType for argument!");
599 // Promote the integer to 32 bits. If the input type is signed use a
600 // sign extend, otherwise use a zero extend.
601 if (Args[i].second->isSigned())
602 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
604 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
609 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
610 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
611 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
612 Args[i].first, PtrOff,
613 DAG.getSrcValue(NULL)));
619 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
620 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
621 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
622 Args[i].first, PtrOff,
623 DAG.getSrcValue(NULL)));
628 if (NumXMMRegs < 3) {
629 RegValuesToPass.push_back(Args[i].first);
632 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
633 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
634 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
635 Args[i].first, PtrOff,
636 DAG.getSrcValue(NULL)));
642 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
645 std::vector<MVT::ValueType> RetVals;
646 MVT::ValueType RetTyVT = getValueType(RetTy);
647 RetVals.push_back(MVT::Other);
649 // The result values produced have to be legal. Promote the result.
651 case MVT::isVoid: break;
653 RetVals.push_back(RetTyVT);
658 RetVals.push_back(MVT::i32);
662 RetVals.push_back(MVT::f32);
664 RetVals.push_back(MVT::f64);
667 RetVals.push_back(MVT::i32);
668 RetVals.push_back(MVT::i32);
672 // Build a sequence of copy-to-reg nodes chained together with token chain
673 // and flag operands which copy the outgoing args into registers.
675 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
676 unsigned CCReg = XMMArgRegs[i];
677 SDOperand RegToPass = RegValuesToPass[i];
678 assert(RegToPass.getValueType() == MVT::Vector);
679 unsigned NumElems = cast<ConstantSDNode>(*(RegToPass.Val->op_end()-2))->getValue();
680 MVT::ValueType EVT = cast<VTSDNode>(*(RegToPass.Val->op_end()-1))->getVT();
681 MVT::ValueType PVT = getVectorType(EVT, NumElems);
682 SDOperand CCRegNode = DAG.getRegister(CCReg, PVT);
683 RegToPass = DAG.getNode(ISD::VBIT_CONVERT, PVT, RegToPass);
684 Chain = DAG.getCopyToReg(Chain, CCRegNode, RegToPass, InFlag);
685 InFlag = Chain.getValue(1);
688 std::vector<MVT::ValueType> NodeTys;
689 NodeTys.push_back(MVT::Other); // Returns a chain
690 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
691 std::vector<SDOperand> Ops;
692 Ops.push_back(Chain);
693 Ops.push_back(Callee);
695 Ops.push_back(InFlag);
697 // FIXME: Do not generate X86ISD::TAILCALL for now.
698 Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
699 InFlag = Chain.getValue(1);
702 NodeTys.push_back(MVT::Other); // Returns a chain
703 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
705 Ops.push_back(Chain);
706 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
707 Ops.push_back(DAG.getConstant(0, getPointerTy()));
708 Ops.push_back(InFlag);
709 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, Ops);
710 InFlag = Chain.getValue(1);
713 if (RetTyVT != MVT::isVoid) {
715 default: assert(0 && "Unknown value type to return!");
718 RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
719 Chain = RetVal.getValue(1);
720 if (RetTyVT == MVT::i1)
721 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::i1, RetVal);
724 RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
725 Chain = RetVal.getValue(1);
728 RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
729 Chain = RetVal.getValue(1);
732 SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
733 SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
735 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
736 Chain = Hi.getValue(1);
741 std::vector<MVT::ValueType> Tys;
742 Tys.push_back(MVT::f64);
743 Tys.push_back(MVT::Other);
744 Tys.push_back(MVT::Flag);
745 std::vector<SDOperand> Ops;
746 Ops.push_back(Chain);
747 Ops.push_back(InFlag);
748 RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
749 Chain = RetVal.getValue(1);
750 InFlag = RetVal.getValue(2);
752 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
753 // shouldn't be necessary except that RFP cannot be live across
754 // multiple blocks. When stackifier is fixed, they can be uncoupled.
755 MachineFunction &MF = DAG.getMachineFunction();
756 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
757 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
759 Tys.push_back(MVT::Other);
761 Ops.push_back(Chain);
762 Ops.push_back(RetVal);
763 Ops.push_back(StackSlot);
764 Ops.push_back(DAG.getValueType(RetTyVT));
765 Ops.push_back(InFlag);
766 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
767 RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
768 DAG.getSrcValue(NULL));
769 Chain = RetVal.getValue(1);
772 if (RetTyVT == MVT::f32 && !X86ScalarSSE)
773 // FIXME: we would really like to remember that this FP_ROUND
774 // operation is okay to eliminate if we allow excess FP precision.
775 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
779 const PackedType *PTy = cast<PackedType>(RetTy);
782 unsigned NumRegs = getPackedTypeBreakdown(PTy, EVT, LVT);
783 assert(NumRegs == 1 && "Unsupported type!");
784 RetVal = DAG.getCopyFromReg(Chain, X86::XMM0, EVT, InFlag);
785 Chain = RetVal.getValue(1);
791 return std::make_pair(RetVal, Chain);
794 //===----------------------------------------------------------------------===//
795 // Fast Calling Convention implementation
796 //===----------------------------------------------------------------------===//
798 // The X86 'fast' calling convention passes up to two integer arguments in
799 // registers (an appropriate portion of EAX/EDX), passes arguments in C order,
800 // and requires that the callee pop its arguments off the stack (allowing proper
801 // tail calls), and has the same return value conventions as C calling convs.
803 // This calling convention always arranges for the callee pop value to be 8n+4
804 // bytes, which is needed for tail recursion elimination and stack alignment
807 // Note that this can be enhanced in the future to pass fp vals in registers
808 // (when we have a global fp allocator) and do other tricks.
811 // FASTCC_NUM_INT_ARGS_INREGS - This is the max number of integer arguments
812 // to pass in registers. 0 is none, 1 is is "use EAX", 2 is "use EAX and
813 // EDX". Anything more is illegal.
815 // FIXME: The linscan register allocator currently has problem with
816 // coalescing. At the time of this writing, whenever it decides to coalesce
817 // a physreg with a virtreg, this increases the size of the physreg's live
818 // range, and the live range cannot ever be reduced. This causes problems if
819 // too many physregs are coaleced with virtregs, which can cause the register
820 // allocator to wedge itself.
822 // This code triggers this problem more often if we pass args in registers,
823 // so disable it until this is fixed.
825 // NOTE: this isn't marked const, so that GCC doesn't emit annoying warnings
826 // about code being dead.
828 static unsigned FASTCC_NUM_INT_ARGS_INREGS = 0;
831 /// HowToPassFastCCArgument - Returns how an formal argument of the specified
832 /// type should be passed. If it is through stack, returns the size of the stack
833 /// frame; if it is through integer or XMM register, returns the number of
834 /// integer or XMM registers are needed.
836 HowToPassFastCCArgument(MVT::ValueType ObjectVT,
837 unsigned NumIntRegs, unsigned NumXMMRegs,
838 unsigned &ObjSize, unsigned &ObjIntRegs,
839 unsigned &ObjXMMRegs) {
844 default: assert(0 && "Unhandled argument type!");
847 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
853 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
859 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
865 if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
867 } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
893 X86TargetLowering::PreprocessFastCCArguments(std::vector<SDOperand>Args,
894 Function &F, SelectionDAG &DAG) {
895 unsigned NumArgs = Args.size();
896 MachineFunction &MF = DAG.getMachineFunction();
897 MachineFrameInfo *MFI = MF.getFrameInfo();
899 // Add DAG nodes to load the arguments... On entry to a function the stack
900 // frame looks like this:
902 // [ESP] -- return address
903 // [ESP + 4] -- first nonreg argument (leftmost lexically)
904 // [ESP + 8] -- second nonreg argument, if first argument is 4 bytes in size
906 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
908 // Keep track of the number of integer regs passed so far. This can be either
909 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
911 unsigned NumIntRegs = 0;
912 unsigned NumXMMRegs = 0; // XMM regs used for parameter passing.
913 unsigned XMMArgRegs[] = { X86::XMM0, X86::XMM1, X86::XMM2 };
915 for (unsigned i = 0; i < NumArgs; ++i) {
916 SDOperand Op = Args[i];
917 std::vector<SDOperand> Objs = getFormalArgObjects(Op);
918 for (std::vector<SDOperand>::iterator I = Objs.begin(), E = Objs.end();
921 MVT::ValueType ObjectVT = Obj.getValueType();
922 unsigned ArgIncrement = 4;
923 unsigned ObjSize = 0;
924 unsigned ObjIntRegs = 0;
925 unsigned ObjXMMRegs = 0;
927 HowToPassFastCCArgument(ObjectVT, NumIntRegs, NumXMMRegs,
928 ObjSize, ObjIntRegs, ObjXMMRegs);
930 ArgIncrement = ObjSize;
933 std::pair<FALocInfo,FALocInfo> Loc = std::make_pair(FALocInfo(),
937 default: assert(0 && "Unhandled argument type!");
940 Reg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
941 X86::R8RegisterClass);
942 Loc.first.Kind = FALocInfo::LiveInRegLoc;
944 Loc.first.Typ = MVT::i8;
947 Reg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
948 X86::R16RegisterClass);
949 Loc.first.Kind = FALocInfo::LiveInRegLoc;
951 Loc.first.Typ = MVT::i16;
954 Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
955 X86::R32RegisterClass);
956 Loc.first.Kind = FALocInfo::LiveInRegLoc;
958 Loc.first.Typ = MVT::i32;
961 Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
962 X86::R32RegisterClass);
963 Loc.first.Kind = FALocInfo::LiveInRegLoc;
965 Loc.first.Typ = MVT::i32;
966 if (ObjIntRegs == 2) {
967 Reg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
968 Loc.second.Kind = FALocInfo::LiveInRegLoc;
969 Loc.second.Loc = Reg;
970 Loc.second.Typ = MVT::i32;
979 Reg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], X86::VR128RegisterClass);
980 Loc.first.Kind = FALocInfo::LiveInRegLoc;
982 Loc.first.Typ = ObjectVT;
985 NumIntRegs += ObjIntRegs;
986 NumXMMRegs += ObjXMMRegs;
989 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
990 if (ObjectVT == MVT::i64 && ObjIntRegs) {
991 Loc.second.Kind = FALocInfo::StackFrameLoc;
994 Loc.first.Kind = FALocInfo::StackFrameLoc;
997 ArgOffset += ArgIncrement; // Move on to the next argument.
1000 FormalArgLocs.push_back(Loc);
1004 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
1005 // arguments and the arguments after the retaddr has been pushed are aligned.
1006 if ((ArgOffset & 7) == 0)
1009 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
1010 ReturnAddrIndex = 0; // No return address slot generated yet.
1011 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
1012 BytesCallerReserves = 0;
1014 // Finally, inform the code generator which regs we return values in.
1015 switch (getValueType(F.getReturnType())) {
1016 default: assert(0 && "Unknown type!");
1017 case MVT::isVoid: break;
1022 MF.addLiveOut(X86::EAX);
1025 MF.addLiveOut(X86::EAX);
1026 MF.addLiveOut(X86::EDX);
1030 MF.addLiveOut(X86::ST0);
1033 const PackedType *PTy = cast<PackedType>(F.getReturnType());
1036 unsigned NumRegs = getPackedTypeBreakdown(PTy, EVT, LVT);
1037 assert(NumRegs == 1 && "Unsupported type!");
1038 MF.addLiveOut(X86::XMM0);
1045 X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) {
1046 unsigned NumArgs = Op.Val->getNumValues();
1047 MachineFunction &MF = DAG.getMachineFunction();
1049 for (unsigned i = 0; i < NumArgs; ++i) {
1050 MVT::ValueType VT = Op.Val->getValueType(i);
1051 std::pair<FALocInfo, FALocInfo> Loc = FormalArgLocs[i];
1053 if (Loc.first.Kind == FALocInfo::StackFrameLoc) {
1054 // Create the SelectionDAG nodes corresponding to a load from this parameter
1055 SDOperand FIN = DAG.getFrameIndex(Loc.first.Loc, MVT::i32);
1056 ArgValue = DAG.getLoad(Op.Val->getValueType(i),DAG.getEntryNode(), FIN,
1057 DAG.getSrcValue(NULL));
1059 // Must be a CopyFromReg
1060 ArgValue= DAG.getCopyFromReg(DAG.getEntryNode(), Loc.first.Loc,
1064 if (Loc.second.Kind != FALocInfo::None) {
1065 SDOperand ArgValue2;
1066 if (Loc.second.Kind == FALocInfo::StackFrameLoc) {
1067 // Create the SelectionDAG nodes corresponding to a load from this parameter
1068 SDOperand FIN = DAG.getFrameIndex(Loc.second.Loc, MVT::i32);
1069 ArgValue2 = DAG.getLoad(Op.Val->getValueType(i),DAG.getEntryNode(), FIN,
1070 DAG.getSrcValue(NULL));
1072 // Must be a CopyFromReg
1073 ArgValue2 = DAG.getCopyFromReg(DAG.getEntryNode(),
1074 Loc.second.Loc, Loc.second.Typ);
1076 ArgValue = DAG.getNode(ISD::BUILD_PAIR, VT, ArgValue, ArgValue2);
1078 FormalArgs.push_back(ArgValue);
1082 std::pair<SDOperand, SDOperand>
1083 X86TargetLowering::LowerFastCCCallTo(SDOperand Chain, const Type *RetTy,
1084 bool isTailCall, SDOperand Callee,
1085 ArgListTy &Args, SelectionDAG &DAG) {
1086 // Count how many bytes are to be pushed on the stack.
1087 unsigned NumBytes = 0;
1089 // Keep track of the number of integer regs passed so far. This can be either
1090 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
1092 unsigned NumIntRegs = 0;
1094 for (unsigned i = 0, e = Args.size(); i != e; ++i)
1095 switch (getValueType(Args[i].second)) {
1096 default: assert(0 && "Unknown value type!");
1101 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
1110 if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
1113 } else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
1114 NumIntRegs = FASTCC_NUM_INT_ARGS_INREGS;
1125 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
1126 // arguments and the arguments after the retaddr has been pushed are aligned.
1127 if ((NumBytes & 7) == 0)
1130 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
1132 // Arguments go on the stack in reverse order, as specified by the ABI.
1133 unsigned ArgOffset = 0;
1134 SDOperand StackPtr = DAG.getRegister(X86::ESP, MVT::i32);
1136 std::vector<SDOperand> Stores;
1137 std::vector<SDOperand> RegValuesToPass;
1138 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
1139 switch (getValueType(Args[i].second)) {
1140 default: assert(0 && "Unexpected ValueType for argument!");
1142 Args[i].first = DAG.getNode(ISD::ANY_EXTEND, MVT::i8, Args[i].first);
1147 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
1148 RegValuesToPass.push_back(Args[i].first);
1154 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1155 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
1156 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1157 Args[i].first, PtrOff,
1158 DAG.getSrcValue(NULL)));
1163 // Can pass (at least) part of it in regs?
1164 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
1165 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
1166 Args[i].first, DAG.getConstant(1, MVT::i32));
1167 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
1168 Args[i].first, DAG.getConstant(0, MVT::i32));
1169 RegValuesToPass.push_back(Lo);
1172 // Pass both parts in regs?
1173 if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
1174 RegValuesToPass.push_back(Hi);
1177 // Pass the high part in memory.
1178 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1179 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
1180 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1181 Hi, PtrOff, DAG.getSrcValue(NULL)));
1188 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
1189 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
1190 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
1191 Args[i].first, PtrOff,
1192 DAG.getSrcValue(NULL)));
1197 if (!Stores.empty())
1198 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
1200 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
1201 // arguments and the arguments after the retaddr has been pushed are aligned.
1202 if ((ArgOffset & 7) == 0)
1205 std::vector<MVT::ValueType> RetVals;
1206 MVT::ValueType RetTyVT = getValueType(RetTy);
1208 RetVals.push_back(MVT::Other);
1210 // The result values produced have to be legal. Promote the result.
1212 case MVT::isVoid: break;
1214 RetVals.push_back(RetTyVT);
1219 RetVals.push_back(MVT::i32);
1223 RetVals.push_back(MVT::f32);
1225 RetVals.push_back(MVT::f64);
1228 RetVals.push_back(MVT::i32);
1229 RetVals.push_back(MVT::i32);
1233 // Build a sequence of copy-to-reg nodes chained together with token chain
1234 // and flag operands which copy the outgoing args into registers.
1236 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
1238 SDOperand RegToPass = RegValuesToPass[i];
1239 switch (RegToPass.getValueType()) {
1240 default: assert(0 && "Bad thing to pass in regs");
1242 CCReg = (i == 0) ? X86::AL : X86::DL;
1245 CCReg = (i == 0) ? X86::AX : X86::DX;
1248 CCReg = (i == 0) ? X86::EAX : X86::EDX;
1252 Chain = DAG.getCopyToReg(Chain, CCReg, RegToPass, InFlag);
1253 InFlag = Chain.getValue(1);
1256 std::vector<MVT::ValueType> NodeTys;
1257 NodeTys.push_back(MVT::Other); // Returns a chain
1258 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1259 std::vector<SDOperand> Ops;
1260 Ops.push_back(Chain);
1261 Ops.push_back(Callee);
1263 Ops.push_back(InFlag);
1265 // FIXME: Do not generate X86ISD::TAILCALL for now.
1266 Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, NodeTys, Ops);
1267 InFlag = Chain.getValue(1);
1270 NodeTys.push_back(MVT::Other); // Returns a chain
1271 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
1273 Ops.push_back(Chain);
1274 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
1275 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
1276 Ops.push_back(InFlag);
1277 Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, Ops);
1278 InFlag = Chain.getValue(1);
1281 if (RetTyVT != MVT::isVoid) {
1283 default: assert(0 && "Unknown value type to return!");
1286 RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
1287 Chain = RetVal.getValue(1);
1288 if (RetTyVT == MVT::i1)
1289 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::i1, RetVal);
1292 RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
1293 Chain = RetVal.getValue(1);
1296 RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
1297 Chain = RetVal.getValue(1);
1300 SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
1301 SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
1303 RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
1304 Chain = Hi.getValue(1);
1309 std::vector<MVT::ValueType> Tys;
1310 Tys.push_back(MVT::f64);
1311 Tys.push_back(MVT::Other);
1312 Tys.push_back(MVT::Flag);
1313 std::vector<SDOperand> Ops;
1314 Ops.push_back(Chain);
1315 Ops.push_back(InFlag);
1316 RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
1317 Chain = RetVal.getValue(1);
1318 InFlag = RetVal.getValue(2);
1320 // FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
1321 // shouldn't be necessary except that RFP cannot be live across
1322 // multiple blocks. When stackifier is fixed, they can be uncoupled.
1323 MachineFunction &MF = DAG.getMachineFunction();
1324 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
1325 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1327 Tys.push_back(MVT::Other);
1329 Ops.push_back(Chain);
1330 Ops.push_back(RetVal);
1331 Ops.push_back(StackSlot);
1332 Ops.push_back(DAG.getValueType(RetTyVT));
1333 Ops.push_back(InFlag);
1334 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
1335 RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
1336 DAG.getSrcValue(NULL));
1337 Chain = RetVal.getValue(1);
1340 if (RetTyVT == MVT::f32 && !X86ScalarSSE)
1341 // FIXME: we would really like to remember that this FP_ROUND
1342 // operation is okay to eliminate if we allow excess FP precision.
1343 RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
1349 return std::make_pair(RetVal, Chain);
1352 SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
1353 if (ReturnAddrIndex == 0) {
1354 // Set up a frame object for the return address.
1355 MachineFunction &MF = DAG.getMachineFunction();
1356 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
1359 return DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
1364 std::pair<SDOperand, SDOperand> X86TargetLowering::
1365 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
1366 SelectionDAG &DAG) {
1368 if (Depth) // Depths > 0 not supported yet!
1369 Result = DAG.getConstant(0, getPointerTy());
1371 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
1372 if (!isFrameAddress)
1373 // Just load the return address
1374 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI,
1375 DAG.getSrcValue(NULL));
1377 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
1378 DAG.getConstant(4, MVT::i32));
1380 return std::make_pair(Result, Chain);
1383 /// getCondBrOpcodeForX86CC - Returns the X86 conditional branch opcode
1384 /// which corresponds to the condition code.
1385 static unsigned getCondBrOpcodeForX86CC(unsigned X86CC) {
1387 default: assert(0 && "Unknown X86 conditional code!");
1388 case X86ISD::COND_A: return X86::JA;
1389 case X86ISD::COND_AE: return X86::JAE;
1390 case X86ISD::COND_B: return X86::JB;
1391 case X86ISD::COND_BE: return X86::JBE;
1392 case X86ISD::COND_E: return X86::JE;
1393 case X86ISD::COND_G: return X86::JG;
1394 case X86ISD::COND_GE: return X86::JGE;
1395 case X86ISD::COND_L: return X86::JL;
1396 case X86ISD::COND_LE: return X86::JLE;
1397 case X86ISD::COND_NE: return X86::JNE;
1398 case X86ISD::COND_NO: return X86::JNO;
1399 case X86ISD::COND_NP: return X86::JNP;
1400 case X86ISD::COND_NS: return X86::JNS;
1401 case X86ISD::COND_O: return X86::JO;
1402 case X86ISD::COND_P: return X86::JP;
1403 case X86ISD::COND_S: return X86::JS;
1407 /// translateX86CC - do a one to one translation of a ISD::CondCode to the X86
1408 /// specific condition code. It returns a false if it cannot do a direct
1409 /// translation. X86CC is the translated CondCode. Flip is set to true if the
1410 /// the order of comparison operands should be flipped.
1411 static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
1412 unsigned &X86CC, bool &Flip) {
1414 X86CC = X86ISD::COND_INVALID;
1416 switch (SetCCOpcode) {
1418 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
1419 case ISD::SETGT: X86CC = X86ISD::COND_G; break;
1420 case ISD::SETGE: X86CC = X86ISD::COND_GE; break;
1421 case ISD::SETLT: X86CC = X86ISD::COND_L; break;
1422 case ISD::SETLE: X86CC = X86ISD::COND_LE; break;
1423 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1424 case ISD::SETULT: X86CC = X86ISD::COND_B; break;
1425 case ISD::SETUGT: X86CC = X86ISD::COND_A; break;
1426 case ISD::SETULE: X86CC = X86ISD::COND_BE; break;
1427 case ISD::SETUGE: X86CC = X86ISD::COND_AE; break;
1430 // On a floating point condition, the flags are set as follows:
1432 // 0 | 0 | 0 | X > Y
1433 // 0 | 0 | 1 | X < Y
1434 // 1 | 0 | 0 | X == Y
1435 // 1 | 1 | 1 | unordered
1436 switch (SetCCOpcode) {
1439 case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
1440 case ISD::SETOLT: Flip = true; // Fallthrough
1442 case ISD::SETGT: X86CC = X86ISD::COND_A; break;
1443 case ISD::SETOLE: Flip = true; // Fallthrough
1445 case ISD::SETGE: X86CC = X86ISD::COND_AE; break;
1446 case ISD::SETUGT: Flip = true; // Fallthrough
1448 case ISD::SETLT: X86CC = X86ISD::COND_B; break;
1449 case ISD::SETUGE: Flip = true; // Fallthrough
1451 case ISD::SETLE: X86CC = X86ISD::COND_BE; break;
1453 case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
1454 case ISD::SETUO: X86CC = X86ISD::COND_P; break;
1455 case ISD::SETO: X86CC = X86ISD::COND_NP; break;
1459 return X86CC != X86ISD::COND_INVALID;
1462 static bool translateX86CC(SDOperand CC, bool isFP, unsigned &X86CC,
1464 return translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC, Flip);
1467 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
1468 /// code. Current x86 isa includes the following FP cmov instructions:
1469 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
1470 static bool hasFPCMov(unsigned X86CC) {
1474 case X86ISD::COND_B:
1475 case X86ISD::COND_BE:
1476 case X86ISD::COND_E:
1477 case X86ISD::COND_P:
1478 case X86ISD::COND_A:
1479 case X86ISD::COND_AE:
1480 case X86ISD::COND_NE:
1481 case X86ISD::COND_NP:
1487 X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1488 MachineBasicBlock *BB) {
1489 switch (MI->getOpcode()) {
1490 default: assert(false && "Unexpected instr type to insert");
1491 case X86::CMOV_FR32:
1492 case X86::CMOV_FR64:
1493 case X86::CMOV_V4F32:
1494 case X86::CMOV_V2F64:
1495 case X86::CMOV_V2I64: {
1496 // To "insert" a SELECT_CC instruction, we actually have to insert the
1497 // diamond control-flow pattern. The incoming instruction knows the
1498 // destination vreg to set, the condition code register to branch on, the
1499 // true/false values to select between, and a branch opcode to use.
1500 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1501 ilist<MachineBasicBlock>::iterator It = BB;
1507 // cmpTY ccX, r1, r2
1509 // fallthrough --> copy0MBB
1510 MachineBasicBlock *thisMBB = BB;
1511 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
1512 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
1513 unsigned Opc = getCondBrOpcodeForX86CC(MI->getOperand(3).getImmedValue());
1514 BuildMI(BB, Opc, 1).addMBB(sinkMBB);
1515 MachineFunction *F = BB->getParent();
1516 F->getBasicBlockList().insert(It, copy0MBB);
1517 F->getBasicBlockList().insert(It, sinkMBB);
1518 // Update machine-CFG edges by first adding all successors of the current
1519 // block to the new block which will contain the Phi node for the select.
1520 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
1521 e = BB->succ_end(); i != e; ++i)
1522 sinkMBB->addSuccessor(*i);
1523 // Next, remove all successors of the current block, and add the true
1524 // and fallthrough blocks as its successors.
1525 while(!BB->succ_empty())
1526 BB->removeSuccessor(BB->succ_begin());
1527 BB->addSuccessor(copy0MBB);
1528 BB->addSuccessor(sinkMBB);
1531 // %FalseValue = ...
1532 // # fallthrough to sinkMBB
1535 // Update machine-CFG edges
1536 BB->addSuccessor(sinkMBB);
1539 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1542 BuildMI(BB, X86::PHI, 4, MI->getOperand(0).getReg())
1543 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
1544 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1546 delete MI; // The pseudo instruction is gone now.
1550 case X86::FP_TO_INT16_IN_MEM:
1551 case X86::FP_TO_INT32_IN_MEM:
1552 case X86::FP_TO_INT64_IN_MEM: {
1553 // Change the floating point control register to use "round towards zero"
1554 // mode when truncating to an integer value.
1555 MachineFunction *F = BB->getParent();
1556 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
1557 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1559 // Load the old value of the high byte of the control word...
1561 F->getSSARegMap()->createVirtualRegister(X86::R16RegisterClass);
1562 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
1564 // Set the high part to be round to zero...
1565 addFrameReference(BuildMI(BB, X86::MOV16mi, 5), CWFrameIdx).addImm(0xC7F);
1567 // Reload the modified control word now...
1568 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1570 // Restore the memory image of control word to original value
1571 addFrameReference(BuildMI(BB, X86::MOV16mr, 5), CWFrameIdx).addReg(OldCW);
1573 // Get the X86 opcode to use.
1575 switch (MI->getOpcode()) {
1576 default: assert(0 && "illegal opcode!");
1577 case X86::FP_TO_INT16_IN_MEM: Opc = X86::FpIST16m; break;
1578 case X86::FP_TO_INT32_IN_MEM: Opc = X86::FpIST32m; break;
1579 case X86::FP_TO_INT64_IN_MEM: Opc = X86::FpIST64m; break;
1583 MachineOperand &Op = MI->getOperand(0);
1584 if (Op.isRegister()) {
1585 AM.BaseType = X86AddressMode::RegBase;
1586 AM.Base.Reg = Op.getReg();
1588 AM.BaseType = X86AddressMode::FrameIndexBase;
1589 AM.Base.FrameIndex = Op.getFrameIndex();
1591 Op = MI->getOperand(1);
1592 if (Op.isImmediate())
1593 AM.Scale = Op.getImmedValue();
1594 Op = MI->getOperand(2);
1595 if (Op.isImmediate())
1596 AM.IndexReg = Op.getImmedValue();
1597 Op = MI->getOperand(3);
1598 if (Op.isGlobalAddress()) {
1599 AM.GV = Op.getGlobal();
1601 AM.Disp = Op.getImmedValue();
1603 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(MI->getOperand(4).getReg());
1605 // Reload the original control word now.
1606 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1608 delete MI; // The pseudo instruction is gone now.
1615 //===----------------------------------------------------------------------===//
1616 // X86 Custom Lowering Hooks
1617 //===----------------------------------------------------------------------===//
1619 /// DarwinGVRequiresExtraLoad - true if accessing the GV requires an extra
1620 /// load. For Darwin, external and weak symbols are indirect, loading the value
1621 /// at address GV rather then the value of GV itself. This means that the
1622 /// GlobalAddress must be in the base or index register of the address, not the
1623 /// GV offset field.
1624 static bool DarwinGVRequiresExtraLoad(GlobalValue *GV) {
1625 return (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
1626 (GV->isExternal() && !GV->hasNotBeenReadFromBytecode()));
1629 /// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return
1630 /// true if Op is undef or if its value falls within the specified range (L, H].
1631 static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) {
1632 if (Op.getOpcode() == ISD::UNDEF)
1635 unsigned Val = cast<ConstantSDNode>(Op)->getValue();
1636 return (Val >= Low && Val < Hi);
1639 /// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return
1640 /// true if Op is undef or if its value equal to the specified value.
1641 static bool isUndefOrEqual(SDOperand Op, unsigned Val) {
1642 if (Op.getOpcode() == ISD::UNDEF)
1644 return cast<ConstantSDNode>(Op)->getValue() == Val;
1647 /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
1648 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
1649 bool X86::isPSHUFDMask(SDNode *N) {
1650 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1652 if (N->getNumOperands() != 4)
1655 // Check if the value doesn't reference the second vector.
1656 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1657 SDOperand Arg = N->getOperand(i);
1658 if (Arg.getOpcode() == ISD::UNDEF) continue;
1659 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1660 if (cast<ConstantSDNode>(Arg)->getValue() >= 4)
1667 /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
1668 /// specifies a shuffle of elements that is suitable for input to PSHUFHW.
1669 bool X86::isPSHUFHWMask(SDNode *N) {
1670 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1672 if (N->getNumOperands() != 8)
1675 // Lower quadword copied in order.
1676 for (unsigned i = 0; i != 4; ++i) {
1677 SDOperand Arg = N->getOperand(i);
1678 if (Arg.getOpcode() == ISD::UNDEF) continue;
1679 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1680 if (cast<ConstantSDNode>(Arg)->getValue() != i)
1684 // Upper quadword shuffled.
1685 for (unsigned i = 4; i != 8; ++i) {
1686 SDOperand Arg = N->getOperand(i);
1687 if (Arg.getOpcode() == ISD::UNDEF) continue;
1688 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1689 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1690 if (Val < 4 || Val > 7)
1697 /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
1698 /// specifies a shuffle of elements that is suitable for input to PSHUFLW.
1699 bool X86::isPSHUFLWMask(SDNode *N) {
1700 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1702 if (N->getNumOperands() != 8)
1705 // Upper quadword copied in order.
1706 for (unsigned i = 4; i != 8; ++i)
1707 if (!isUndefOrEqual(N->getOperand(i), i))
1710 // Lower quadword shuffled.
1711 for (unsigned i = 0; i != 4; ++i)
1712 if (!isUndefOrInRange(N->getOperand(i), 0, 4))
1718 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
1719 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
1720 static bool isSHUFPMask(std::vector<SDOperand> &N) {
1721 unsigned NumElems = N.size();
1722 if (NumElems != 2 && NumElems != 4) return false;
1724 unsigned Half = NumElems / 2;
1725 for (unsigned i = 0; i < Half; ++i)
1726 if (!isUndefOrInRange(N[i], 0, NumElems))
1728 for (unsigned i = Half; i < NumElems; ++i)
1729 if (!isUndefOrInRange(N[i], NumElems, NumElems*2))
1735 bool X86::isSHUFPMask(SDNode *N) {
1736 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1737 std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
1738 return ::isSHUFPMask(Ops);
1741 /// isCommutedSHUFP - Returns true if the shuffle mask is except
1742 /// the reverse of what x86 shuffles want. x86 shuffles requires the lower
1743 /// half elements to come from vector 1 (which would equal the dest.) and
1744 /// the upper half to come from vector 2.
1745 static bool isCommutedSHUFP(std::vector<SDOperand> &Ops) {
1746 unsigned NumElems = Ops.size();
1747 if (NumElems != 2 && NumElems != 4) return false;
1749 unsigned Half = NumElems / 2;
1750 for (unsigned i = 0; i < Half; ++i)
1751 if (!isUndefOrInRange(Ops[i], NumElems, NumElems*2))
1753 for (unsigned i = Half; i < NumElems; ++i)
1754 if (!isUndefOrInRange(Ops[i], 0, NumElems))
1759 static bool isCommutedSHUFP(SDNode *N) {
1760 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1761 std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
1762 return isCommutedSHUFP(Ops);
1765 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
1766 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
1767 bool X86::isMOVHLPSMask(SDNode *N) {
1768 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1770 if (N->getNumOperands() != 4)
1773 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
1774 return isUndefOrEqual(N->getOperand(0), 6) &&
1775 isUndefOrEqual(N->getOperand(1), 7) &&
1776 isUndefOrEqual(N->getOperand(2), 2) &&
1777 isUndefOrEqual(N->getOperand(3), 3);
1780 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
1781 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
1782 bool X86::isMOVLPMask(SDNode *N) {
1783 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1785 unsigned NumElems = N->getNumOperands();
1786 if (NumElems != 2 && NumElems != 4)
1789 for (unsigned i = 0; i < NumElems/2; ++i)
1790 if (!isUndefOrEqual(N->getOperand(i), i + NumElems))
1793 for (unsigned i = NumElems/2; i < NumElems; ++i)
1794 if (!isUndefOrEqual(N->getOperand(i), i))
1800 /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
1801 /// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
1803 bool X86::isMOVHPMask(SDNode *N) {
1804 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1806 unsigned NumElems = N->getNumOperands();
1807 if (NumElems != 2 && NumElems != 4)
1810 for (unsigned i = 0; i < NumElems/2; ++i)
1811 if (!isUndefOrEqual(N->getOperand(i), i))
1814 for (unsigned i = 0; i < NumElems/2; ++i) {
1815 SDOperand Arg = N->getOperand(i + NumElems/2);
1816 if (!isUndefOrEqual(Arg, i + NumElems))
1823 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
1824 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
1825 bool static isUNPCKLMask(std::vector<SDOperand> &N, bool V2IsSplat = false) {
1826 unsigned NumElems = N.size();
1827 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
1830 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
1831 SDOperand BitI = N[i];
1832 SDOperand BitI1 = N[i+1];
1833 if (!isUndefOrEqual(BitI, j))
1836 if (isUndefOrEqual(BitI1, NumElems))
1839 if (!isUndefOrEqual(BitI1, j + NumElems))
1847 bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) {
1848 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1849 std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
1850 return ::isUNPCKLMask(Ops, V2IsSplat);
1853 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
1854 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
1855 bool static isUNPCKHMask(std::vector<SDOperand> &N, bool V2IsSplat = false) {
1856 unsigned NumElems = N.size();
1857 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
1860 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
1861 SDOperand BitI = N[i];
1862 SDOperand BitI1 = N[i+1];
1863 if (!isUndefOrEqual(BitI, j + NumElems/2))
1866 if (isUndefOrEqual(BitI1, NumElems))
1869 if (!isUndefOrEqual(BitI1, j + NumElems/2 + NumElems))
1877 bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) {
1878 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1879 std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
1880 return ::isUNPCKHMask(Ops, V2IsSplat);
1883 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
1884 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
1886 bool X86::isUNPCKL_v_undef_Mask(SDNode *N) {
1887 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1889 unsigned NumElems = N->getNumOperands();
1890 if (NumElems != 4 && NumElems != 8 && NumElems != 16)
1893 for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
1894 SDOperand BitI = N->getOperand(i);
1895 SDOperand BitI1 = N->getOperand(i+1);
1897 if (!isUndefOrEqual(BitI, j))
1899 if (!isUndefOrEqual(BitI1, j))
1906 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
1907 /// specifies a shuffle of elements that is suitable for input to MOVSS,
1908 /// MOVSD, and MOVD, i.e. setting the lowest element.
1909 static bool isMOVLMask(std::vector<SDOperand> &N) {
1910 unsigned NumElems = N.size();
1911 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
1914 if (!isUndefOrEqual(N[0], NumElems))
1917 for (unsigned i = 1; i < NumElems; ++i) {
1918 SDOperand Arg = N[i];
1919 if (!isUndefOrEqual(Arg, i))
1926 bool X86::isMOVLMask(SDNode *N) {
1927 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1928 std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
1929 return ::isMOVLMask(Ops);
1932 /// isCommutedMOVL - Returns true if the shuffle mask is except the reverse
1933 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
1934 /// element of vector 2 and the other elements to come from vector 1 in order.
1935 static bool isCommutedMOVL(std::vector<SDOperand> &Ops, bool V2IsSplat = false) {
1936 unsigned NumElems = Ops.size();
1937 if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
1940 if (!isUndefOrEqual(Ops[0], 0))
1943 for (unsigned i = 1; i < NumElems; ++i) {
1944 SDOperand Arg = Ops[i];
1946 if (!isUndefOrEqual(Arg, NumElems))
1949 if (!isUndefOrEqual(Arg, i+NumElems))
1957 static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false) {
1958 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1959 std::vector<SDOperand> Ops(N->op_begin(), N->op_end());
1960 return isCommutedMOVL(Ops, V2IsSplat);
1963 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
1964 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
1965 bool X86::isMOVSHDUPMask(SDNode *N) {
1966 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1968 if (N->getNumOperands() != 4)
1971 // Expect 1, 1, 3, 3
1972 for (unsigned i = 0; i < 2; ++i) {
1973 SDOperand Arg = N->getOperand(i);
1974 if (Arg.getOpcode() == ISD::UNDEF) continue;
1975 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1976 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1977 if (Val != 1) return false;
1981 for (unsigned i = 2; i < 4; ++i) {
1982 SDOperand Arg = N->getOperand(i);
1983 if (Arg.getOpcode() == ISD::UNDEF) continue;
1984 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
1985 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
1986 if (Val != 3) return false;
1990 // Don't use movshdup if it can be done with a shufps.
1994 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
1995 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
1996 bool X86::isMOVSLDUPMask(SDNode *N) {
1997 assert(N->getOpcode() == ISD::BUILD_VECTOR);
1999 if (N->getNumOperands() != 4)
2002 // Expect 0, 0, 2, 2
2003 for (unsigned i = 0; i < 2; ++i) {
2004 SDOperand Arg = N->getOperand(i);
2005 if (Arg.getOpcode() == ISD::UNDEF) continue;
2006 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2007 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2008 if (Val != 0) return false;
2012 for (unsigned i = 2; i < 4; ++i) {
2013 SDOperand Arg = N->getOperand(i);
2014 if (Arg.getOpcode() == ISD::UNDEF) continue;
2015 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2016 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2017 if (Val != 2) return false;
2021 // Don't use movshdup if it can be done with a shufps.
2025 /// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
2026 /// a splat of a single element.
2027 static bool isSplatMask(SDNode *N) {
2028 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2030 // This is a splat operation if each element of the permute is the same, and
2031 // if the value doesn't reference the second vector.
2032 unsigned NumElems = N->getNumOperands();
2033 SDOperand ElementBase;
2035 for (; i != NumElems; ++i) {
2036 SDOperand Elt = N->getOperand(i);
2037 if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) {
2043 if (!ElementBase.Val)
2046 for (; i != NumElems; ++i) {
2047 SDOperand Arg = N->getOperand(i);
2048 if (Arg.getOpcode() == ISD::UNDEF) continue;
2049 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2050 if (Arg != ElementBase) return false;
2053 // Make sure it is a splat of the first vector operand.
2054 return cast<ConstantSDNode>(ElementBase)->getValue() < NumElems;
2057 /// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
2058 /// a splat of a single element and it's a 2 or 4 element mask.
2059 bool X86::isSplatMask(SDNode *N) {
2060 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2062 // We can only splat 64-bit, and 32-bit quantities with a single instruction.
2063 if (N->getNumOperands() != 4 && N->getNumOperands() != 2)
2065 return ::isSplatMask(N);
2068 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
2069 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
2071 unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
2072 unsigned NumOperands = N->getNumOperands();
2073 unsigned Shift = (NumOperands == 4) ? 2 : 1;
2075 for (unsigned i = 0; i < NumOperands; ++i) {
2077 SDOperand Arg = N->getOperand(NumOperands-i-1);
2078 if (Arg.getOpcode() != ISD::UNDEF)
2079 Val = cast<ConstantSDNode>(Arg)->getValue();
2080 if (Val >= NumOperands) Val -= NumOperands;
2082 if (i != NumOperands - 1)
2089 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
2090 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
2092 unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) {
2094 // 8 nodes, but we only care about the last 4.
2095 for (unsigned i = 7; i >= 4; --i) {
2097 SDOperand Arg = N->getOperand(i);
2098 if (Arg.getOpcode() != ISD::UNDEF)
2099 Val = cast<ConstantSDNode>(Arg)->getValue();
2108 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
2109 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
2111 unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) {
2113 // 8 nodes, but we only care about the first 4.
2114 for (int i = 3; i >= 0; --i) {
2116 SDOperand Arg = N->getOperand(i);
2117 if (Arg.getOpcode() != ISD::UNDEF)
2118 Val = cast<ConstantSDNode>(Arg)->getValue();
2127 /// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand
2128 /// specifies a 8 element shuffle that can be broken into a pair of
2129 /// PSHUFHW and PSHUFLW.
2130 static bool isPSHUFHW_PSHUFLWMask(SDNode *N) {
2131 assert(N->getOpcode() == ISD::BUILD_VECTOR);
2133 if (N->getNumOperands() != 8)
2136 // Lower quadword shuffled.
2137 for (unsigned i = 0; i != 4; ++i) {
2138 SDOperand Arg = N->getOperand(i);
2139 if (Arg.getOpcode() == ISD::UNDEF) continue;
2140 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2141 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2146 // Upper quadword shuffled.
2147 for (unsigned i = 4; i != 8; ++i) {
2148 SDOperand Arg = N->getOperand(i);
2149 if (Arg.getOpcode() == ISD::UNDEF) continue;
2150 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2151 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2152 if (Val < 4 || Val > 7)
2159 /// CommuteVectorShuffle - Swap vector_shuffle operandsas well as
2160 /// values in ther permute mask.
2161 static SDOperand CommuteVectorShuffle(SDOperand Op, SelectionDAG &DAG) {
2162 SDOperand V1 = Op.getOperand(0);
2163 SDOperand V2 = Op.getOperand(1);
2164 SDOperand Mask = Op.getOperand(2);
2165 MVT::ValueType VT = Op.getValueType();
2166 MVT::ValueType MaskVT = Mask.getValueType();
2167 MVT::ValueType EltVT = MVT::getVectorBaseType(MaskVT);
2168 unsigned NumElems = Mask.getNumOperands();
2169 std::vector<SDOperand> MaskVec;
2171 for (unsigned i = 0; i != NumElems; ++i) {
2172 SDOperand Arg = Mask.getOperand(i);
2173 if (Arg.getOpcode() == ISD::UNDEF) {
2174 MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT));
2177 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2178 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2180 MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT));
2182 MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT));
2185 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
2186 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V2, V1, Mask);
2189 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
2190 /// match movhlps. The lower half elements should come from upper half of
2191 /// V1 (and in order), and the upper half elements should come from the upper
2192 /// half of V2 (and in order).
2193 static bool ShouldXformToMOVHLPS(SDNode *Mask) {
2194 unsigned NumElems = Mask->getNumOperands();
2197 for (unsigned i = 0, e = 2; i != e; ++i)
2198 if (!isUndefOrEqual(Mask->getOperand(i), i+2))
2200 for (unsigned i = 2; i != 4; ++i)
2201 if (!isUndefOrEqual(Mask->getOperand(i), i+4))
2206 /// isScalarLoadToVector - Returns true if the node is a scalar load that
2207 /// is promoted to a vector.
2208 static inline bool isScalarLoadToVector(SDNode *N) {
2209 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) {
2210 N = N->getOperand(0).Val;
2211 return (N->getOpcode() == ISD::LOAD);
2216 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
2217 /// match movlp{s|d}. The lower half elements should come from lower half of
2218 /// V1 (and in order), and the upper half elements should come from the upper
2219 /// half of V2 (and in order). And since V1 will become the source of the
2220 /// MOVLP, it must be either a vector load or a scalar load to vector.
2221 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *Mask) {
2222 if (V1->getOpcode() != ISD::LOAD && !isScalarLoadToVector(V1))
2225 unsigned NumElems = Mask->getNumOperands();
2226 if (NumElems != 2 && NumElems != 4)
2228 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
2229 if (!isUndefOrEqual(Mask->getOperand(i), i))
2231 for (unsigned i = NumElems/2; i != NumElems; ++i)
2232 if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems))
2237 /// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are
2239 static bool isSplatVector(SDNode *N) {
2240 if (N->getOpcode() != ISD::BUILD_VECTOR)
2243 SDOperand SplatValue = N->getOperand(0);
2244 for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
2245 if (N->getOperand(i) != SplatValue)
2250 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
2251 /// that point to V2 points to its first element.
2252 static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) {
2253 assert(Mask.getOpcode() == ISD::BUILD_VECTOR);
2255 bool Changed = false;
2256 std::vector<SDOperand> MaskVec;
2257 unsigned NumElems = Mask.getNumOperands();
2258 for (unsigned i = 0; i != NumElems; ++i) {
2259 SDOperand Arg = Mask.getOperand(i);
2260 if (Arg.getOpcode() != ISD::UNDEF) {
2261 unsigned Val = cast<ConstantSDNode>(Arg)->getValue();
2262 if (Val > NumElems) {
2263 Arg = DAG.getConstant(NumElems, Arg.getValueType());
2267 MaskVec.push_back(Arg);
2271 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(), MaskVec);
2275 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
2276 /// operation of specified width.
2277 static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) {
2278 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2279 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
2281 std::vector<SDOperand> MaskVec;
2282 MaskVec.push_back(DAG.getConstant(NumElems, BaseVT));
2283 for (unsigned i = 1; i != NumElems; ++i)
2284 MaskVec.push_back(DAG.getConstant(i, BaseVT));
2285 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
2288 /// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation
2289 /// of specified width.
2290 static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) {
2291 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2292 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
2293 std::vector<SDOperand> MaskVec;
2294 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
2295 MaskVec.push_back(DAG.getConstant(i, BaseVT));
2296 MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT));
2298 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
2301 /// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation
2302 /// of specified width.
2303 static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) {
2304 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2305 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
2306 unsigned Half = NumElems/2;
2307 std::vector<SDOperand> MaskVec;
2308 for (unsigned i = 0; i != Half; ++i) {
2309 MaskVec.push_back(DAG.getConstant(i + Half, BaseVT));
2310 MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT));
2312 return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
2315 /// getZeroVector - Returns a vector of specified type with all zero elements.
2317 static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) {
2318 assert(MVT::isVector(VT) && "Expected a vector type");
2319 unsigned NumElems = getVectorNumElements(VT);
2320 MVT::ValueType EVT = MVT::getVectorBaseType(VT);
2321 bool isFP = MVT::isFloatingPoint(EVT);
2322 SDOperand Zero = isFP ? DAG.getConstantFP(0.0, EVT) : DAG.getConstant(0, EVT);
2323 std::vector<SDOperand> ZeroVec(NumElems, Zero);
2324 return DAG.getNode(ISD::BUILD_VECTOR, VT, ZeroVec);
2327 /// PromoteSplat - Promote a splat of v8i16 or v16i8 to v4i32.
2329 static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG) {
2330 SDOperand V1 = Op.getOperand(0);
2331 SDOperand Mask = Op.getOperand(2);
2332 MVT::ValueType VT = Op.getValueType();
2333 unsigned NumElems = Mask.getNumOperands();
2334 Mask = getUnpacklMask(NumElems, DAG);
2335 while (NumElems != 4) {
2336 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask);
2339 V1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, V1);
2341 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
2342 Mask = getZeroVector(MaskVT, DAG);
2343 SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32, V1,
2344 DAG.getNode(ISD::UNDEF, MVT::v4i32), Mask);
2345 return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle);
2348 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
2350 static inline bool isZeroNode(SDOperand Elt) {
2351 return ((isa<ConstantSDNode>(Elt) &&
2352 cast<ConstantSDNode>(Elt)->getValue() == 0) ||
2353 (isa<ConstantFPSDNode>(Elt) &&
2354 cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0)));
2357 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
2358 /// vector and zero or undef vector.
2359 static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, MVT::ValueType VT,
2360 unsigned NumElems, unsigned Idx,
2361 bool isZero, SelectionDAG &DAG) {
2362 SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT);
2363 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2364 MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT);
2365 SDOperand Zero = DAG.getConstant(0, EVT);
2366 std::vector<SDOperand> MaskVec(NumElems, Zero);
2367 MaskVec[Idx] = DAG.getConstant(NumElems, EVT);
2368 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
2369 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
2372 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
2374 static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros,
2375 unsigned NumNonZero, unsigned NumZero,
2376 SelectionDAG &DAG) {
2382 for (unsigned i = 0; i < 16; ++i) {
2383 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
2384 if (ThisIsNonZero && First) {
2386 V = getZeroVector(MVT::v8i16, DAG);
2388 V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
2393 SDOperand ThisElt(0, 0), LastElt(0, 0);
2394 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
2395 if (LastIsNonZero) {
2396 LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1));
2398 if (ThisIsNonZero) {
2399 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i));
2400 ThisElt = DAG.getNode(ISD::SHL, MVT::i16,
2401 ThisElt, DAG.getConstant(8, MVT::i8));
2403 ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt);
2408 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt,
2409 DAG.getConstant(i/2, MVT::i32));
2413 return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V);
2416 /// LowerBuildVectorv16i8 - Custom lower build_vector of v8i16.
2418 static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros,
2419 unsigned NumNonZero, unsigned NumZero,
2420 SelectionDAG &DAG) {
2426 for (unsigned i = 0; i < 8; ++i) {
2427 bool isNonZero = (NonZeros & (1 << i)) != 0;
2431 V = getZeroVector(MVT::v8i16, DAG);
2433 V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
2436 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i),
2437 DAG.getConstant(i, MVT::i32));
2445 X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
2446 // All zero's are handled with pxor.
2447 if (ISD::isBuildVectorAllZeros(Op.Val))
2450 // All one's are handled with pcmpeqd.
2451 if (ISD::isBuildVectorAllOnes(Op.Val))
2454 MVT::ValueType VT = Op.getValueType();
2455 MVT::ValueType EVT = MVT::getVectorBaseType(VT);
2456 unsigned EVTBits = MVT::getSizeInBits(EVT);
2458 unsigned NumElems = Op.getNumOperands();
2459 unsigned NumZero = 0;
2460 unsigned NumNonZero = 0;
2461 unsigned NonZeros = 0;
2462 std::set<SDOperand> Values;
2463 for (unsigned i = 0; i < NumElems; ++i) {
2464 SDOperand Elt = Op.getOperand(i);
2465 if (Elt.getOpcode() != ISD::UNDEF) {
2467 if (isZeroNode(Elt))
2470 NonZeros |= (1 << i);
2476 if (NumNonZero == 0)
2477 // Must be a mix of zero and undef. Return a zero vector.
2478 return getZeroVector(VT, DAG);
2480 // Splat is obviously ok. Let legalizer expand it to a shuffle.
2481 if (Values.size() == 1)
2484 // Special case for single non-zero element.
2485 if (NumNonZero == 1) {
2486 unsigned Idx = CountTrailingZeros_32(NonZeros);
2487 SDOperand Item = Op.getOperand(Idx);
2488 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item);
2490 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
2491 return getShuffleVectorZeroOrUndef(Item, VT, NumElems, Idx,
2494 if (EVTBits == 32) {
2495 // Turn it into a shuffle of zero and zero-extended scalar to vector.
2496 Item = getShuffleVectorZeroOrUndef(Item, VT, NumElems, 0, NumZero > 0,
2498 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2499 MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT);
2500 std::vector<SDOperand> MaskVec;
2501 for (unsigned i = 0; i < NumElems; i++)
2502 MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT));
2503 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
2504 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item,
2505 DAG.getNode(ISD::UNDEF, VT), Mask);
2509 // Let legalizer expand 2-widde build_vector's.
2513 // If element VT is < 32 bits, convert it to inserts into a zero vector.
2515 SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG);
2516 if (V.Val) return V;
2519 if (EVTBits == 16) {
2520 SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG);
2521 if (V.Val) return V;
2524 // If element VT is == 32 bits, turn it into a number of shuffles.
2525 std::vector<SDOperand> V(NumElems);
2526 if (NumElems == 4 && NumZero > 0) {
2527 for (unsigned i = 0; i < 4; ++i) {
2528 bool isZero = !(NonZeros & (1 << i));
2530 V[i] = getZeroVector(VT, DAG);
2532 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
2535 for (unsigned i = 0; i < 2; ++i) {
2536 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
2539 V[i] = V[i*2]; // Must be a zero vector.
2542 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2],
2543 getMOVLMask(NumElems, DAG));
2546 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
2547 getMOVLMask(NumElems, DAG));
2550 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
2551 getUnpacklMask(NumElems, DAG));
2556 // Take advantage of the fact R32 to VR128 scalar_to_vector (i.e. movd)
2557 // clears the upper bits.
2558 // FIXME: we can do the same for v4f32 case when we know both parts of
2559 // the lower half come from scalar_to_vector (loadf32). We should do
2560 // that in post legalizer dag combiner with target specific hooks.
2561 if (MVT::isInteger(EVT) && (NonZeros & (0x3 << 2)) == 0)
2563 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2564 MVT::ValueType EVT = MVT::getVectorBaseType(MaskVT);
2565 std::vector<SDOperand> MaskVec;
2566 bool Reverse = (NonZeros & 0x3) == 2;
2567 for (unsigned i = 0; i < 2; ++i)
2569 MaskVec.push_back(DAG.getConstant(1-i, EVT));
2571 MaskVec.push_back(DAG.getConstant(i, EVT));
2572 Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2;
2573 for (unsigned i = 0; i < 2; ++i)
2575 MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT));
2577 MaskVec.push_back(DAG.getConstant(i+NumElems, EVT));
2578 SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
2579 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask);
2582 if (Values.size() > 2) {
2583 // Expand into a number of unpckl*.
2585 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
2586 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
2587 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
2588 SDOperand UnpckMask = getUnpacklMask(NumElems, DAG);
2589 for (unsigned i = 0; i < NumElems; ++i)
2590 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
2592 while (NumElems != 0) {
2593 for (unsigned i = 0; i < NumElems; ++i)
2594 V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems],
2605 X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) {
2606 SDOperand V1 = Op.getOperand(0);
2607 SDOperand V2 = Op.getOperand(1);
2608 SDOperand PermMask = Op.getOperand(2);
2609 MVT::ValueType VT = Op.getValueType();
2610 unsigned NumElems = PermMask.getNumOperands();
2611 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
2612 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
2614 if (isSplatMask(PermMask.Val)) {
2615 if (NumElems <= 4) return Op;
2616 // Promote it to a v4i32 splat.
2617 return PromoteSplat(Op, DAG);
2620 if (X86::isMOVLMask(PermMask.Val))
2621 return (V1IsUndef) ? V2 : Op;
2623 if (X86::isMOVSHDUPMask(PermMask.Val) ||
2624 X86::isMOVSLDUPMask(PermMask.Val) ||
2625 X86::isMOVHLPSMask(PermMask.Val) ||
2626 X86::isMOVHPMask(PermMask.Val) ||
2627 X86::isMOVLPMask(PermMask.Val))
2630 if (ShouldXformToMOVHLPS(PermMask.Val) ||
2631 ShouldXformToMOVLP(V1.Val, PermMask.Val))
2632 return CommuteVectorShuffle(Op, DAG);
2634 bool V1IsSplat = isSplatVector(V1.Val) || V1.getOpcode() == ISD::UNDEF;
2635 bool V2IsSplat = isSplatVector(V2.Val) || V2.getOpcode() == ISD::UNDEF;
2636 if (V1IsSplat && !V2IsSplat) {
2637 Op = CommuteVectorShuffle(Op, DAG);
2638 V1 = Op.getOperand(0);
2639 V2 = Op.getOperand(1);
2640 PermMask = Op.getOperand(2);
2644 if (isCommutedMOVL(PermMask.Val, V2IsSplat)) {
2645 if (V2IsUndef) return V1;
2646 Op = CommuteVectorShuffle(Op, DAG);
2647 V1 = Op.getOperand(0);
2648 V2 = Op.getOperand(1);
2649 PermMask = Op.getOperand(2);
2651 // V2 is a splat, so the mask may be malformed. That is, it may point
2652 // to any V2 element. The instruction selectior won't like this. Get
2653 // a corrected mask and commute to form a proper MOVS{S|D}.
2654 SDOperand NewMask = getMOVLMask(NumElems, DAG);
2655 if (NewMask.Val != PermMask.Val)
2656 Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
2661 if (X86::isUNPCKL_v_undef_Mask(PermMask.Val) ||
2662 X86::isUNPCKLMask(PermMask.Val) ||
2663 X86::isUNPCKHMask(PermMask.Val))
2667 // Normalize mask so all entries that point to V2 points to its first
2668 // element then try to match unpck{h|l} again. If match, return a
2669 // new vector_shuffle with the corrected mask.
2670 SDOperand NewMask = NormalizeMask(PermMask, DAG);
2671 if (NewMask.Val != PermMask.Val) {
2672 if (X86::isUNPCKLMask(PermMask.Val, true)) {
2673 SDOperand NewMask = getUnpacklMask(NumElems, DAG);
2674 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
2675 } else if (X86::isUNPCKHMask(PermMask.Val, true)) {
2676 SDOperand NewMask = getUnpackhMask(NumElems, DAG);
2677 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
2682 // Normalize the node to match x86 shuffle ops if needed
2683 if (V2.getOpcode() != ISD::UNDEF)
2684 if (isCommutedSHUFP(PermMask.Val)) {
2685 Op = CommuteVectorShuffle(Op, DAG);
2686 V1 = Op.getOperand(0);
2687 V2 = Op.getOperand(1);
2688 PermMask = Op.getOperand(2);
2691 // If VT is integer, try PSHUF* first, then SHUFP*.
2692 if (MVT::isInteger(VT)) {
2693 if (X86::isPSHUFDMask(PermMask.Val) ||
2694 X86::isPSHUFHWMask(PermMask.Val) ||
2695 X86::isPSHUFLWMask(PermMask.Val)) {
2696 if (V2.getOpcode() != ISD::UNDEF)
2697 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
2698 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
2702 if (X86::isSHUFPMask(PermMask.Val))
2705 // Handle v8i16 shuffle high / low shuffle node pair.
2706 if (VT == MVT::v8i16 && isPSHUFHW_PSHUFLWMask(PermMask.Val)) {
2707 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
2708 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
2709 std::vector<SDOperand> MaskVec;
2710 for (unsigned i = 0; i != 4; ++i)
2711 MaskVec.push_back(PermMask.getOperand(i));
2712 for (unsigned i = 4; i != 8; ++i)
2713 MaskVec.push_back(DAG.getConstant(i, BaseVT));
2714 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
2715 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
2717 for (unsigned i = 0; i != 4; ++i)
2718 MaskVec.push_back(DAG.getConstant(i, BaseVT));
2719 for (unsigned i = 4; i != 8; ++i)
2720 MaskVec.push_back(PermMask.getOperand(i));
2721 Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec);
2722 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
2725 // Floating point cases in the other order.
2726 if (X86::isSHUFPMask(PermMask.Val))
2728 if (X86::isPSHUFDMask(PermMask.Val) ||
2729 X86::isPSHUFHWMask(PermMask.Val) ||
2730 X86::isPSHUFLWMask(PermMask.Val)) {
2731 if (V2.getOpcode() != ISD::UNDEF)
2732 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
2733 DAG.getNode(ISD::UNDEF, V1.getValueType()),PermMask);
2738 if (NumElems == 4) {
2739 MVT::ValueType MaskVT = PermMask.getValueType();
2740 MVT::ValueType MaskEVT = MVT::getVectorBaseType(MaskVT);
2741 std::vector<std::pair<int, int> > Locs;
2742 Locs.reserve(NumElems);
2743 std::vector<SDOperand> Mask1(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
2744 std::vector<SDOperand> Mask2(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
2747 // If no more than two elements come from either vector. This can be
2748 // implemented with two shuffles. First shuffle gather the elements.
2749 // The second shuffle, which takes the first shuffle as both of its
2750 // vector operands, put the elements into the right order.
2751 for (unsigned i = 0; i != NumElems; ++i) {
2752 SDOperand Elt = PermMask.getOperand(i);
2753 if (Elt.getOpcode() == ISD::UNDEF) {
2754 Locs[i] = std::make_pair(-1, -1);
2756 unsigned Val = cast<ConstantSDNode>(Elt)->getValue();
2757 if (Val < NumElems) {
2758 Locs[i] = std::make_pair(0, NumLo);
2762 Locs[i] = std::make_pair(1, NumHi);
2763 if (2+NumHi < NumElems)
2764 Mask1[2+NumHi] = Elt;
2769 if (NumLo <= 2 && NumHi <= 2) {
2770 V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
2771 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, Mask1));
2772 for (unsigned i = 0; i != NumElems; ++i) {
2773 if (Locs[i].first == -1)
2776 unsigned Idx = (i < NumElems/2) ? 0 : NumElems;
2777 Idx += Locs[i].first * (NumElems/2) + Locs[i].second;
2778 Mask2[i] = DAG.getConstant(Idx, MaskEVT);
2782 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1,
2783 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, Mask2));
2786 // Break it into (shuffle shuffle_hi, shuffle_lo).
2788 std::vector<SDOperand> LoMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
2789 std::vector<SDOperand> HiMask(NumElems, DAG.getNode(ISD::UNDEF, MaskEVT));
2790 std::vector<SDOperand> *MaskPtr = &LoMask;
2791 unsigned MaskIdx = 0;
2793 unsigned HiIdx = NumElems/2;
2794 for (unsigned i = 0; i != NumElems; ++i) {
2795 if (i == NumElems/2) {
2801 SDOperand Elt = PermMask.getOperand(i);
2802 if (Elt.getOpcode() == ISD::UNDEF) {
2803 Locs[i] = std::make_pair(-1, -1);
2804 } else if (cast<ConstantSDNode>(Elt)->getValue() < NumElems) {
2805 Locs[i] = std::make_pair(MaskIdx, LoIdx);
2806 (*MaskPtr)[LoIdx] = Elt;
2809 Locs[i] = std::make_pair(MaskIdx, HiIdx);
2810 (*MaskPtr)[HiIdx] = Elt;
2815 SDOperand LoShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
2816 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, LoMask));
2817 SDOperand HiShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
2818 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, HiMask));
2819 std::vector<SDOperand> MaskOps;
2820 for (unsigned i = 0; i != NumElems; ++i) {
2821 if (Locs[i].first == -1) {
2822 MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
2824 unsigned Idx = Locs[i].first * NumElems + Locs[i].second;
2825 MaskOps.push_back(DAG.getConstant(Idx, MaskEVT));
2828 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle,
2829 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskOps));
2836 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
2837 if (!isa<ConstantSDNode>(Op.getOperand(1)))
2840 MVT::ValueType VT = Op.getValueType();
2841 // TODO: handle v16i8.
2842 if (MVT::getSizeInBits(VT) == 16) {
2843 // Transform it so it match pextrw which produces a 32-bit result.
2844 MVT::ValueType EVT = (MVT::ValueType)(VT+1);
2845 SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT,
2846 Op.getOperand(0), Op.getOperand(1));
2847 SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract,
2848 DAG.getValueType(VT));
2849 return DAG.getNode(ISD::TRUNCATE, VT, Assert);
2850 } else if (MVT::getSizeInBits(VT) == 32) {
2851 SDOperand Vec = Op.getOperand(0);
2852 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
2856 // SHUFPS the element to the lowest double word, then movss.
2857 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
2858 SDOperand IdxNode = DAG.getConstant((Idx < 2) ? Idx : Idx+4,
2859 MVT::getVectorBaseType(MaskVT));
2860 std::vector<SDOperand> IdxVec;
2861 IdxVec.push_back(DAG.getConstant(Idx, MVT::getVectorBaseType(MaskVT)));
2862 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
2863 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
2864 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
2865 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, IdxVec);
2866 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
2868 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
2869 DAG.getConstant(0, MVT::i32));
2870 } else if (MVT::getSizeInBits(VT) == 64) {
2871 SDOperand Vec = Op.getOperand(0);
2872 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
2876 // UNPCKHPD the element to the lowest double word, then movsd.
2877 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
2878 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
2879 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
2880 std::vector<SDOperand> IdxVec;
2881 IdxVec.push_back(DAG.getConstant(1, MVT::getVectorBaseType(MaskVT)));
2882 IdxVec.push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorBaseType(MaskVT)));
2883 SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, IdxVec);
2884 Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
2885 Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
2886 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
2887 DAG.getConstant(0, MVT::i32));
2894 X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
2895 // Transform it so it match pinsrw which expects a 16-bit value in a R32
2896 // as its second argument.
2897 MVT::ValueType VT = Op.getValueType();
2898 MVT::ValueType BaseVT = MVT::getVectorBaseType(VT);
2899 SDOperand N0 = Op.getOperand(0);
2900 SDOperand N1 = Op.getOperand(1);
2901 SDOperand N2 = Op.getOperand(2);
2902 if (MVT::getSizeInBits(BaseVT) == 16) {
2903 if (N1.getValueType() != MVT::i32)
2904 N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
2905 if (N2.getValueType() != MVT::i32)
2906 N2 = DAG.getConstant(cast<ConstantSDNode>(N2)->getValue(), MVT::i32);
2907 return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2);
2908 } else if (MVT::getSizeInBits(BaseVT) == 32) {
2909 unsigned Idx = cast<ConstantSDNode>(N2)->getValue();
2912 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, N1);
2913 MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
2914 MVT::ValueType BaseVT = MVT::getVectorBaseType(MaskVT);
2915 std::vector<SDOperand> MaskVec;
2916 MaskVec.push_back(DAG.getConstant(4, BaseVT));
2917 for (unsigned i = 1; i <= 3; ++i)
2918 MaskVec.push_back(DAG.getConstant(i, BaseVT));
2919 return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, N0, N1,
2920 DAG.getNode(ISD::BUILD_VECTOR, MaskVT, MaskVec));
2922 // Use two pinsrw instructions to insert a 32 bit value.
2924 if (MVT::isFloatingPoint(N1.getValueType())) {
2925 if (N1.getOpcode() == ISD::LOAD) {
2926 // Just load directly from f32mem to R32.
2927 N1 = DAG.getLoad(MVT::i32, N1.getOperand(0), N1.getOperand(1),
2930 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4f32, N1);
2931 N1 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, N1);
2932 N1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, N1,
2933 DAG.getConstant(0, MVT::i32));
2936 N0 = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, N0);
2937 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1,
2938 DAG.getConstant(Idx, MVT::i32));
2939 N1 = DAG.getNode(ISD::SRL, MVT::i32, N1, DAG.getConstant(16, MVT::i8));
2940 N0 = DAG.getNode(X86ISD::PINSRW, MVT::v8i16, N0, N1,
2941 DAG.getConstant(Idx+1, MVT::i32));
2942 return DAG.getNode(ISD::BIT_CONVERT, VT, N0);
2950 X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
2951 SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0));
2952 return DAG.getNode(X86ISD::S2VEC, Op.getValueType(), AnyExt);
2955 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
2956 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
2957 // one of the above mentioned nodes. It has to be wrapped because otherwise
2958 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2959 // be used to form addressing mode. These wrapped nodes will be selected
2962 X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
2963 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2964 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
2965 DAG.getTargetConstantPool(CP->get(), getPointerTy(),
2966 CP->getAlignment()));
2967 if (Subtarget->isTargetDarwin()) {
2968 // With PIC, the address is actually $g + Offset.
2969 if (getTargetMachine().getRelocationModel() == Reloc::PIC)
2970 Result = DAG.getNode(ISD::ADD, getPointerTy(),
2971 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
2978 X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
2979 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2980 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
2981 DAG.getTargetGlobalAddress(GV, getPointerTy()));
2982 if (Subtarget->isTargetDarwin()) {
2983 // With PIC, the address is actually $g + Offset.
2984 if (getTargetMachine().getRelocationModel() == Reloc::PIC)
2985 Result = DAG.getNode(ISD::ADD, getPointerTy(),
2986 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
2988 // For Darwin, external and weak symbols are indirect, so we want to load
2989 // the value at address GV, not the value of GV itself. This means that
2990 // the GlobalAddress must be in the base or index register of the address,
2991 // not the GV offset field.
2992 if (getTargetMachine().getRelocationModel() != Reloc::Static &&
2993 DarwinGVRequiresExtraLoad(GV))
2994 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(),
2995 Result, DAG.getSrcValue(NULL));
3002 X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) {
3003 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
3004 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
3005 DAG.getTargetExternalSymbol(Sym, getPointerTy()));
3006 if (Subtarget->isTargetDarwin()) {
3007 // With PIC, the address is actually $g + Offset.
3008 if (getTargetMachine().getRelocationModel() == Reloc::PIC)
3009 Result = DAG.getNode(ISD::ADD, getPointerTy(),
3010 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
3016 SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) {
3017 assert(Op.getNumOperands() == 3 && Op.getValueType() == MVT::i32 &&
3018 "Not an i64 shift!");
3019 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
3020 SDOperand ShOpLo = Op.getOperand(0);
3021 SDOperand ShOpHi = Op.getOperand(1);
3022 SDOperand ShAmt = Op.getOperand(2);
3023 SDOperand Tmp1 = isSRA ? DAG.getNode(ISD::SRA, MVT::i32, ShOpHi,
3024 DAG.getConstant(31, MVT::i8))
3025 : DAG.getConstant(0, MVT::i32);
3027 SDOperand Tmp2, Tmp3;
3028 if (Op.getOpcode() == ISD::SHL_PARTS) {
3029 Tmp2 = DAG.getNode(X86ISD::SHLD, MVT::i32, ShOpHi, ShOpLo, ShAmt);
3030 Tmp3 = DAG.getNode(ISD::SHL, MVT::i32, ShOpLo, ShAmt);
3032 Tmp2 = DAG.getNode(X86ISD::SHRD, MVT::i32, ShOpLo, ShOpHi, ShAmt);
3033 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, MVT::i32, ShOpHi, ShAmt);
3036 SDOperand InFlag = DAG.getNode(X86ISD::TEST, MVT::Flag,
3037 ShAmt, DAG.getConstant(32, MVT::i8));
3040 SDOperand CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
3042 std::vector<MVT::ValueType> Tys;
3043 Tys.push_back(MVT::i32);
3044 Tys.push_back(MVT::Flag);
3045 std::vector<SDOperand> Ops;
3046 if (Op.getOpcode() == ISD::SHL_PARTS) {
3047 Ops.push_back(Tmp2);
3048 Ops.push_back(Tmp3);
3050 Ops.push_back(InFlag);
3051 Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
3052 InFlag = Hi.getValue(1);
3055 Ops.push_back(Tmp3);
3056 Ops.push_back(Tmp1);
3058 Ops.push_back(InFlag);
3059 Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
3061 Ops.push_back(Tmp2);
3062 Ops.push_back(Tmp3);
3064 Ops.push_back(InFlag);
3065 Lo = DAG.getNode(X86ISD::CMOV, Tys, Ops);
3066 InFlag = Lo.getValue(1);
3069 Ops.push_back(Tmp3);
3070 Ops.push_back(Tmp1);
3072 Ops.push_back(InFlag);
3073 Hi = DAG.getNode(X86ISD::CMOV, Tys, Ops);
3077 Tys.push_back(MVT::i32);
3078 Tys.push_back(MVT::i32);
3082 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
3085 SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
3086 assert(Op.getOperand(0).getValueType() <= MVT::i64 &&
3087 Op.getOperand(0).getValueType() >= MVT::i16 &&
3088 "Unknown SINT_TO_FP to lower!");
3091 MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
3092 unsigned Size = MVT::getSizeInBits(SrcVT)/8;
3093 MachineFunction &MF = DAG.getMachineFunction();
3094 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
3095 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
3096 SDOperand Chain = DAG.getNode(ISD::STORE, MVT::Other,
3097 DAG.getEntryNode(), Op.getOperand(0),
3098 StackSlot, DAG.getSrcValue(NULL));
3101 std::vector<MVT::ValueType> Tys;
3102 Tys.push_back(MVT::f64);
3103 Tys.push_back(MVT::Other);
3104 if (X86ScalarSSE) Tys.push_back(MVT::Flag);
3105 std::vector<SDOperand> Ops;
3106 Ops.push_back(Chain);
3107 Ops.push_back(StackSlot);
3108 Ops.push_back(DAG.getValueType(SrcVT));
3109 Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
3113 Chain = Result.getValue(1);
3114 SDOperand InFlag = Result.getValue(2);
3116 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
3117 // shouldn't be necessary except that RFP cannot be live across
3118 // multiple blocks. When stackifier is fixed, they can be uncoupled.
3119 MachineFunction &MF = DAG.getMachineFunction();
3120 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
3121 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
3122 std::vector<MVT::ValueType> Tys;
3123 Tys.push_back(MVT::Other);
3124 std::vector<SDOperand> Ops;
3125 Ops.push_back(Chain);
3126 Ops.push_back(Result);
3127 Ops.push_back(StackSlot);
3128 Ops.push_back(DAG.getValueType(Op.getValueType()));
3129 Ops.push_back(InFlag);
3130 Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
3131 Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot,
3132 DAG.getSrcValue(NULL));
3138 SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
3139 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
3140 "Unknown FP_TO_SINT to lower!");
3141 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
3143 MachineFunction &MF = DAG.getMachineFunction();
3144 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
3145 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
3146 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
3149 switch (Op.getValueType()) {
3150 default: assert(0 && "Invalid FP_TO_SINT to lower!");
3151 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
3152 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
3153 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
3156 SDOperand Chain = DAG.getEntryNode();
3157 SDOperand Value = Op.getOperand(0);
3159 assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
3160 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value, StackSlot,
3161 DAG.getSrcValue(0));
3162 std::vector<MVT::ValueType> Tys;
3163 Tys.push_back(MVT::f64);
3164 Tys.push_back(MVT::Other);
3165 std::vector<SDOperand> Ops;
3166 Ops.push_back(Chain);
3167 Ops.push_back(StackSlot);
3168 Ops.push_back(DAG.getValueType(Op.getOperand(0).getValueType()));
3169 Value = DAG.getNode(X86ISD::FLD, Tys, Ops);
3170 Chain = Value.getValue(1);
3171 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
3172 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
3175 // Build the FP_TO_INT*_IN_MEM
3176 std::vector<SDOperand> Ops;
3177 Ops.push_back(Chain);
3178 Ops.push_back(Value);
3179 Ops.push_back(StackSlot);
3180 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops);
3183 return DAG.getLoad(Op.getValueType(), FIST, StackSlot,
3184 DAG.getSrcValue(NULL));
3187 SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) {
3188 MVT::ValueType VT = Op.getValueType();
3189 const Type *OpNTy = MVT::getTypeForValueType(VT);
3190 std::vector<Constant*> CV;
3191 if (VT == MVT::f64) {
3192 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63))));
3193 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3195 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31))));
3196 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3197 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3198 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3200 Constant *CS = ConstantStruct::get(CV);
3201 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
3203 = DAG.getNode(X86ISD::LOAD_PACK,
3204 VT, DAG.getEntryNode(), CPIdx, DAG.getSrcValue(NULL));
3205 return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask);
3208 SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) {
3209 MVT::ValueType VT = Op.getValueType();
3210 const Type *OpNTy = MVT::getTypeForValueType(VT);
3211 std::vector<Constant*> CV;
3212 if (VT == MVT::f64) {
3213 CV.push_back(ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63)));
3214 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3216 CV.push_back(ConstantFP::get(OpNTy, BitsToFloat(1U << 31)));
3217 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3218 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3219 CV.push_back(ConstantFP::get(OpNTy, 0.0));
3221 Constant *CS = ConstantStruct::get(CV);
3222 SDOperand CPIdx = DAG.getConstantPool(CS, getPointerTy(), 4);
3223 SDOperand Mask = DAG.getNode(X86ISD::LOAD_PACK,
3224 VT, DAG.getEntryNode(), CPIdx, DAG.getSrcValue(NULL));
3225 return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask);
3228 SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) {
3229 assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
3231 SDOperand CC = Op.getOperand(2);
3232 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
3233 bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
3236 if (translateX86CC(CC, isFP, X86CC, Flip)) {
3238 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
3239 Op.getOperand(1), Op.getOperand(0));
3241 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
3242 Op.getOperand(0), Op.getOperand(1));
3243 return DAG.getNode(X86ISD::SETCC, MVT::i8,
3244 DAG.getConstant(X86CC, MVT::i8), Cond);
3246 assert(isFP && "Illegal integer SetCC!");
3248 Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
3249 Op.getOperand(0), Op.getOperand(1));
3250 std::vector<MVT::ValueType> Tys;
3251 std::vector<SDOperand> Ops;
3252 switch (SetCCOpcode) {
3253 default: assert(false && "Illegal floating point SetCC!");
3254 case ISD::SETOEQ: { // !PF & ZF
3255 Tys.push_back(MVT::i8);
3256 Tys.push_back(MVT::Flag);
3257 Ops.push_back(DAG.getConstant(X86ISD::COND_NP, MVT::i8));
3258 Ops.push_back(Cond);
3259 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
3260 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
3261 DAG.getConstant(X86ISD::COND_E, MVT::i8),
3263 return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
3265 case ISD::SETUNE: { // PF | !ZF
3266 Tys.push_back(MVT::i8);
3267 Tys.push_back(MVT::Flag);
3268 Ops.push_back(DAG.getConstant(X86ISD::COND_P, MVT::i8));
3269 Ops.push_back(Cond);
3270 SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
3271 SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
3272 DAG.getConstant(X86ISD::COND_NE, MVT::i8),
3274 return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
3280 SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) {
3281 MVT::ValueType VT = Op.getValueType();
3282 bool isFPStack = MVT::isFloatingPoint(VT) && !X86ScalarSSE;
3283 bool addTest = false;
3284 SDOperand Op0 = Op.getOperand(0);
3286 if (Op0.getOpcode() == ISD::SETCC)
3287 Op0 = LowerOperation(Op0, DAG);
3289 if (Op0.getOpcode() == X86ISD::SETCC) {
3290 // If condition flag is set by a X86ISD::CMP, then make a copy of it
3291 // (since flag operand cannot be shared). If the X86ISD::SETCC does not
3292 // have another use it will be eliminated.
3293 // If the X86ISD::SETCC has more than one use, then it's probably better
3294 // to use a test instead of duplicating the X86ISD::CMP (for register
3295 // pressure reason).
3296 unsigned CmpOpc = Op0.getOperand(1).getOpcode();
3297 if (CmpOpc == X86ISD::CMP || CmpOpc == X86ISD::COMI ||
3298 CmpOpc == X86ISD::UCOMI) {
3299 if (!Op0.hasOneUse()) {
3300 std::vector<MVT::ValueType> Tys;
3301 for (unsigned i = 0; i < Op0.Val->getNumValues(); ++i)
3302 Tys.push_back(Op0.Val->getValueType(i));
3303 std::vector<SDOperand> Ops;
3304 for (unsigned i = 0; i < Op0.getNumOperands(); ++i)
3305 Ops.push_back(Op0.getOperand(i));
3306 Op0 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
3309 CC = Op0.getOperand(0);
3310 Cond = Op0.getOperand(1);
3311 // Make a copy as flag result cannot be used by more than one.
3312 Cond = DAG.getNode(CmpOpc, MVT::Flag,
3313 Cond.getOperand(0), Cond.getOperand(1));
3315 isFPStack && !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
3322 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
3323 Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Op0, Op0);
3326 std::vector<MVT::ValueType> Tys;
3327 Tys.push_back(Op.getValueType());
3328 Tys.push_back(MVT::Flag);
3329 std::vector<SDOperand> Ops;
3330 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
3331 // condition is true.
3332 Ops.push_back(Op.getOperand(2));
3333 Ops.push_back(Op.getOperand(1));
3335 Ops.push_back(Cond);
3336 return DAG.getNode(X86ISD::CMOV, Tys, Ops);
3339 SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) {
3340 bool addTest = false;
3341 SDOperand Cond = Op.getOperand(1);
3342 SDOperand Dest = Op.getOperand(2);
3344 if (Cond.getOpcode() == ISD::SETCC)
3345 Cond = LowerOperation(Cond, DAG);
3347 if (Cond.getOpcode() == X86ISD::SETCC) {
3348 // If condition flag is set by a X86ISD::CMP, then make a copy of it
3349 // (since flag operand cannot be shared). If the X86ISD::SETCC does not
3350 // have another use it will be eliminated.
3351 // If the X86ISD::SETCC has more than one use, then it's probably better
3352 // to use a test instead of duplicating the X86ISD::CMP (for register
3353 // pressure reason).
3354 unsigned CmpOpc = Cond.getOperand(1).getOpcode();
3355 if (CmpOpc == X86ISD::CMP || CmpOpc == X86ISD::COMI ||
3356 CmpOpc == X86ISD::UCOMI) {
3357 if (!Cond.hasOneUse()) {
3358 std::vector<MVT::ValueType> Tys;
3359 for (unsigned i = 0; i < Cond.Val->getNumValues(); ++i)
3360 Tys.push_back(Cond.Val->getValueType(i));
3361 std::vector<SDOperand> Ops;
3362 for (unsigned i = 0; i < Cond.getNumOperands(); ++i)
3363 Ops.push_back(Cond.getOperand(i));
3364 Cond = DAG.getNode(X86ISD::SETCC, Tys, Ops);
3367 CC = Cond.getOperand(0);
3368 Cond = Cond.getOperand(1);
3369 // Make a copy as flag result cannot be used by more than one.
3370 Cond = DAG.getNode(CmpOpc, MVT::Flag,
3371 Cond.getOperand(0), Cond.getOperand(1));
3378 CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
3379 Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Cond, Cond);
3381 return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
3382 Op.getOperand(0), Op.getOperand(2), CC, Cond);
3385 SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
3386 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
3387 SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
3388 DAG.getTargetJumpTable(JT->getIndex(),
3390 if (Subtarget->isTargetDarwin()) {
3391 // With PIC, the address is actually $g + Offset.
3392 if (getTargetMachine().getRelocationModel() == Reloc::PIC)
3393 Result = DAG.getNode(ISD::ADD, getPointerTy(),
3394 DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
3400 SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) {
3403 switch(Op.getNumOperands()) {
3405 assert(0 && "Do not know how to return this many arguments!");
3407 case 1: // ret void.
3408 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, Op.getOperand(0),
3409 DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
3411 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
3413 if (MVT::isVector(ArgVT)) {
3414 // Integer or FP vector result -> XMM0.
3415 if (DAG.getMachineFunction().liveout_empty())
3416 DAG.getMachineFunction().addLiveOut(X86::XMM0);
3417 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::XMM0, Op.getOperand(1),
3419 } else if (MVT::isInteger(ArgVT)) {
3420 // Integer result -> EAX
3421 if (DAG.getMachineFunction().liveout_empty())
3422 DAG.getMachineFunction().addLiveOut(X86::EAX);
3424 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::EAX, Op.getOperand(1),
3426 } else if (!X86ScalarSSE) {
3427 // FP return with fp-stack value.
3428 if (DAG.getMachineFunction().liveout_empty())
3429 DAG.getMachineFunction().addLiveOut(X86::ST0);
3431 std::vector<MVT::ValueType> Tys;
3432 Tys.push_back(MVT::Other);
3433 Tys.push_back(MVT::Flag);
3434 std::vector<SDOperand> Ops;
3435 Ops.push_back(Op.getOperand(0));
3436 Ops.push_back(Op.getOperand(1));
3437 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
3439 // FP return with ScalarSSE (return on fp-stack).
3440 if (DAG.getMachineFunction().liveout_empty())
3441 DAG.getMachineFunction().addLiveOut(X86::ST0);
3444 SDOperand Chain = Op.getOperand(0);
3445 SDOperand Value = Op.getOperand(1);
3447 if (Value.getOpcode() == ISD::LOAD &&
3448 (Chain == Value.getValue(1) || Chain == Value.getOperand(0))) {
3449 Chain = Value.getOperand(0);
3450 MemLoc = Value.getOperand(1);
3452 // Spill the value to memory and reload it into top of stack.
3453 unsigned Size = MVT::getSizeInBits(ArgVT)/8;
3454 MachineFunction &MF = DAG.getMachineFunction();
3455 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
3456 MemLoc = DAG.getFrameIndex(SSFI, getPointerTy());
3457 Chain = DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0),
3458 Value, MemLoc, DAG.getSrcValue(0));
3460 std::vector<MVT::ValueType> Tys;
3461 Tys.push_back(MVT::f64);
3462 Tys.push_back(MVT::Other);
3463 std::vector<SDOperand> Ops;
3464 Ops.push_back(Chain);
3465 Ops.push_back(MemLoc);
3466 Ops.push_back(DAG.getValueType(ArgVT));
3467 Copy = DAG.getNode(X86ISD::FLD, Tys, Ops);
3469 Tys.push_back(MVT::Other);
3470 Tys.push_back(MVT::Flag);
3472 Ops.push_back(Copy.getValue(1));
3473 Ops.push_back(Copy);
3474 Copy = DAG.getNode(X86ISD::FP_SET_RESULT, Tys, Ops);
3479 if (DAG.getMachineFunction().liveout_empty()) {
3480 DAG.getMachineFunction().addLiveOut(X86::EAX);
3481 DAG.getMachineFunction().addLiveOut(X86::EDX);
3484 Copy = DAG.getCopyToReg(Op.getOperand(0), X86::EDX, Op.getOperand(2),
3486 Copy = DAG.getCopyToReg(Copy, X86::EAX,Op.getOperand(1),Copy.getValue(1));
3489 return DAG.getNode(X86ISD::RET_FLAG, MVT::Other,
3490 Copy, DAG.getConstant(getBytesToPopOnReturn(), MVT::i16),
3495 X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) {
3496 if (FormalArgs.size() == 0) {
3497 unsigned CC = cast<ConstantSDNode>(Op.getOperand(0))->getValue();
3498 if (CC == CallingConv::Fast && EnableFastCC)
3499 LowerFastCCArguments(Op, DAG);
3501 LowerCCCArguments(Op, DAG);
3503 return FormalArgs[Op.ResNo];
3506 SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) {
3507 SDOperand InFlag(0, 0);
3508 SDOperand Chain = Op.getOperand(0);
3510 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
3511 if (Align == 0) Align = 1;
3513 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
3514 // If not DWORD aligned, call memset if size is less than the threshold.
3515 // It knows how to align to the right boundary first.
3516 if ((Align & 3) != 0 ||
3517 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
3518 MVT::ValueType IntPtr = getPointerTy();
3519 const Type *IntPtrTy = getTargetData()->getIntPtrType();
3520 std::vector<std::pair<SDOperand, const Type*> > Args;
3521 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
3522 // Extend the ubyte argument to be an int value for the call.
3523 SDOperand Val = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op.getOperand(2));
3524 Args.push_back(std::make_pair(Val, IntPtrTy));
3525 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy));
3526 std::pair<SDOperand,SDOperand> CallResult =
3527 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false,
3528 DAG.getExternalSymbol("memset", IntPtr), Args, DAG);
3529 return CallResult.second;
3534 ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Op.getOperand(2));
3535 unsigned BytesLeft = 0;
3536 bool TwoRepStos = false;
3539 unsigned Val = ValC->getValue() & 255;
3541 // If the value is a constant, then we can potentially use larger sets.
3542 switch (Align & 3) {
3543 case 2: // WORD aligned
3545 Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
3546 BytesLeft = I->getValue() % 2;
3547 Val = (Val << 8) | Val;
3550 case 0: // DWORD aligned
3553 Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
3554 BytesLeft = I->getValue() % 4;
3556 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
3557 DAG.getConstant(2, MVT::i8));
3560 Val = (Val << 8) | Val;
3561 Val = (Val << 16) | Val;
3564 default: // Byte aligned
3566 Count = Op.getOperand(3);
3571 Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT),
3573 InFlag = Chain.getValue(1);
3576 Count = Op.getOperand(3);
3577 Chain = DAG.getCopyToReg(Chain, X86::AL, Op.getOperand(2), InFlag);
3578 InFlag = Chain.getValue(1);
3581 Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
3582 InFlag = Chain.getValue(1);
3583 Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
3584 InFlag = Chain.getValue(1);
3586 std::vector<MVT::ValueType> Tys;
3587 Tys.push_back(MVT::Other);
3588 Tys.push_back(MVT::Flag);
3589 std::vector<SDOperand> Ops;
3590 Ops.push_back(Chain);
3591 Ops.push_back(DAG.getValueType(AVT));
3592 Ops.push_back(InFlag);
3593 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, Ops);
3596 InFlag = Chain.getValue(1);
3597 Count = Op.getOperand(3);
3598 MVT::ValueType CVT = Count.getValueType();
3599 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
3600 DAG.getConstant(3, CVT));
3601 Chain = DAG.getCopyToReg(Chain, X86::ECX, Left, InFlag);
3602 InFlag = Chain.getValue(1);
3604 Tys.push_back(MVT::Other);
3605 Tys.push_back(MVT::Flag);
3607 Ops.push_back(Chain);
3608 Ops.push_back(DAG.getValueType(MVT::i8));
3609 Ops.push_back(InFlag);
3610 Chain = DAG.getNode(X86ISD::REP_STOS, Tys, Ops);
3611 } else if (BytesLeft) {
3612 // Issue stores for the last 1 - 3 bytes.
3614 unsigned Val = ValC->getValue() & 255;
3615 unsigned Offset = I->getValue() - BytesLeft;
3616 SDOperand DstAddr = Op.getOperand(1);
3617 MVT::ValueType AddrVT = DstAddr.getValueType();
3618 if (BytesLeft >= 2) {
3619 Value = DAG.getConstant((Val << 8) | Val, MVT::i16);
3620 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
3621 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
3622 DAG.getConstant(Offset, AddrVT)),
3623 DAG.getSrcValue(NULL));
3628 if (BytesLeft == 1) {
3629 Value = DAG.getConstant(Val, MVT::i8);
3630 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
3631 DAG.getNode(ISD::ADD, AddrVT, DstAddr,
3632 DAG.getConstant(Offset, AddrVT)),
3633 DAG.getSrcValue(NULL));
3640 SDOperand X86TargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) {
3641 SDOperand Chain = Op.getOperand(0);
3643 (unsigned)cast<ConstantSDNode>(Op.getOperand(4))->getValue();
3644 if (Align == 0) Align = 1;
3646 ConstantSDNode *I = dyn_cast<ConstantSDNode>(Op.getOperand(3));
3647 // If not DWORD aligned, call memcpy if size is less than the threshold.
3648 // It knows how to align to the right boundary first.
3649 if ((Align & 3) != 0 ||
3650 (I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
3651 MVT::ValueType IntPtr = getPointerTy();
3652 const Type *IntPtrTy = getTargetData()->getIntPtrType();
3653 std::vector<std::pair<SDOperand, const Type*> > Args;
3654 Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
3655 Args.push_back(std::make_pair(Op.getOperand(2), IntPtrTy));
3656 Args.push_back(std::make_pair(Op.getOperand(3), IntPtrTy));
3657 std::pair<SDOperand,SDOperand> CallResult =
3658 LowerCallTo(Chain, Type::VoidTy, false, CallingConv::C, false,
3659 DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG);
3660 return CallResult.second;
3665 unsigned BytesLeft = 0;
3666 bool TwoRepMovs = false;
3667 switch (Align & 3) {
3668 case 2: // WORD aligned
3670 Count = DAG.getConstant(I->getValue() / 2, MVT::i32);
3671 BytesLeft = I->getValue() % 2;
3673 case 0: // DWORD aligned
3676 Count = DAG.getConstant(I->getValue() / 4, MVT::i32);
3677 BytesLeft = I->getValue() % 4;
3679 Count = DAG.getNode(ISD::SRL, MVT::i32, Op.getOperand(3),
3680 DAG.getConstant(2, MVT::i8));
3684 default: // Byte aligned
3686 Count = Op.getOperand(3);
3690 SDOperand InFlag(0, 0);
3691 Chain = DAG.getCopyToReg(Chain, X86::ECX, Count, InFlag);
3692 InFlag = Chain.getValue(1);
3693 Chain = DAG.getCopyToReg(Chain, X86::EDI, Op.getOperand(1), InFlag);
3694 InFlag = Chain.getValue(1);
3695 Chain = DAG.getCopyToReg(Chain, X86::ESI, Op.getOperand(2), InFlag);
3696 InFlag = Chain.getValue(1);
3698 std::vector<MVT::ValueType> Tys;
3699 Tys.push_back(MVT::Other);
3700 Tys.push_back(MVT::Flag);
3701 std::vector<SDOperand> Ops;
3702 Ops.push_back(Chain);
3703 Ops.push_back(DAG.getValueType(AVT));
3704 Ops.push_back(InFlag);
3705 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, Ops);
3708 InFlag = Chain.getValue(1);
3709 Count = Op.getOperand(3);
3710 MVT::ValueType CVT = Count.getValueType();
3711 SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
3712 DAG.getConstant(3, CVT));
3713 Chain = DAG.getCopyToReg(Chain, X86::ECX, Left, InFlag);
3714 InFlag = Chain.getValue(1);
3716 Tys.push_back(MVT::Other);
3717 Tys.push_back(MVT::Flag);
3719 Ops.push_back(Chain);
3720 Ops.push_back(DAG.getValueType(MVT::i8));
3721 Ops.push_back(InFlag);
3722 Chain = DAG.getNode(X86ISD::REP_MOVS, Tys, Ops);
3723 } else if (BytesLeft) {
3724 // Issue loads and stores for the last 1 - 3 bytes.
3725 unsigned Offset = I->getValue() - BytesLeft;
3726 SDOperand DstAddr = Op.getOperand(1);
3727 MVT::ValueType DstVT = DstAddr.getValueType();
3728 SDOperand SrcAddr = Op.getOperand(2);
3729 MVT::ValueType SrcVT = SrcAddr.getValueType();
3731 if (BytesLeft >= 2) {
3732 Value = DAG.getLoad(MVT::i16, Chain,
3733 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
3734 DAG.getConstant(Offset, SrcVT)),
3735 DAG.getSrcValue(NULL));
3736 Chain = Value.getValue(1);
3737 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
3738 DAG.getNode(ISD::ADD, DstVT, DstAddr,
3739 DAG.getConstant(Offset, DstVT)),
3740 DAG.getSrcValue(NULL));
3745 if (BytesLeft == 1) {
3746 Value = DAG.getLoad(MVT::i8, Chain,
3747 DAG.getNode(ISD::ADD, SrcVT, SrcAddr,
3748 DAG.getConstant(Offset, SrcVT)),
3749 DAG.getSrcValue(NULL));
3750 Chain = Value.getValue(1);
3751 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
3752 DAG.getNode(ISD::ADD, DstVT, DstAddr,
3753 DAG.getConstant(Offset, DstVT)),
3754 DAG.getSrcValue(NULL));
3762 X86TargetLowering::LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG) {
3763 std::vector<MVT::ValueType> Tys;
3764 Tys.push_back(MVT::Other);
3765 Tys.push_back(MVT::Flag);
3766 std::vector<SDOperand> Ops;
3767 Ops.push_back(Op.getOperand(0));
3768 SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, Ops);
3770 Ops.push_back(DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)));
3771 Ops.push_back(DAG.getCopyFromReg(Ops[0].getValue(1), X86::EDX,
3772 MVT::i32, Ops[0].getValue(2)));
3773 Ops.push_back(Ops[1].getValue(1));
3774 Tys[0] = Tys[1] = MVT::i32;
3775 Tys.push_back(MVT::Other);
3776 return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops);
3779 SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) {
3780 // vastart just stores the address of the VarArgsFrameIndex slot into the
3781 // memory location argument.
3782 // FIXME: Replace MVT::i32 with PointerTy
3783 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
3784 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
3785 Op.getOperand(1), Op.getOperand(2));
3789 X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
3790 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue();
3792 default: return SDOperand(); // Don't custom lower most intrinsics.
3793 // Comparison intrinsics.
3794 case Intrinsic::x86_sse_comieq_ss:
3795 case Intrinsic::x86_sse_comilt_ss:
3796 case Intrinsic::x86_sse_comile_ss:
3797 case Intrinsic::x86_sse_comigt_ss:
3798 case Intrinsic::x86_sse_comige_ss:
3799 case Intrinsic::x86_sse_comineq_ss:
3800 case Intrinsic::x86_sse_ucomieq_ss:
3801 case Intrinsic::x86_sse_ucomilt_ss:
3802 case Intrinsic::x86_sse_ucomile_ss:
3803 case Intrinsic::x86_sse_ucomigt_ss:
3804 case Intrinsic::x86_sse_ucomige_ss:
3805 case Intrinsic::x86_sse_ucomineq_ss:
3806 case Intrinsic::x86_sse2_comieq_sd:
3807 case Intrinsic::x86_sse2_comilt_sd:
3808 case Intrinsic::x86_sse2_comile_sd:
3809 case Intrinsic::x86_sse2_comigt_sd:
3810 case Intrinsic::x86_sse2_comige_sd:
3811 case Intrinsic::x86_sse2_comineq_sd:
3812 case Intrinsic::x86_sse2_ucomieq_sd:
3813 case Intrinsic::x86_sse2_ucomilt_sd:
3814 case Intrinsic::x86_sse2_ucomile_sd:
3815 case Intrinsic::x86_sse2_ucomigt_sd:
3816 case Intrinsic::x86_sse2_ucomige_sd:
3817 case Intrinsic::x86_sse2_ucomineq_sd: {
3819 ISD::CondCode CC = ISD::SETCC_INVALID;
3822 case Intrinsic::x86_sse_comieq_ss:
3823 case Intrinsic::x86_sse2_comieq_sd:
3827 case Intrinsic::x86_sse_comilt_ss:
3828 case Intrinsic::x86_sse2_comilt_sd:
3832 case Intrinsic::x86_sse_comile_ss:
3833 case Intrinsic::x86_sse2_comile_sd:
3837 case Intrinsic::x86_sse_comigt_ss:
3838 case Intrinsic::x86_sse2_comigt_sd:
3842 case Intrinsic::x86_sse_comige_ss:
3843 case Intrinsic::x86_sse2_comige_sd:
3847 case Intrinsic::x86_sse_comineq_ss:
3848 case Intrinsic::x86_sse2_comineq_sd:
3852 case Intrinsic::x86_sse_ucomieq_ss:
3853 case Intrinsic::x86_sse2_ucomieq_sd:
3854 Opc = X86ISD::UCOMI;
3857 case Intrinsic::x86_sse_ucomilt_ss:
3858 case Intrinsic::x86_sse2_ucomilt_sd:
3859 Opc = X86ISD::UCOMI;
3862 case Intrinsic::x86_sse_ucomile_ss:
3863 case Intrinsic::x86_sse2_ucomile_sd:
3864 Opc = X86ISD::UCOMI;
3867 case Intrinsic::x86_sse_ucomigt_ss:
3868 case Intrinsic::x86_sse2_ucomigt_sd:
3869 Opc = X86ISD::UCOMI;
3872 case Intrinsic::x86_sse_ucomige_ss:
3873 case Intrinsic::x86_sse2_ucomige_sd:
3874 Opc = X86ISD::UCOMI;
3877 case Intrinsic::x86_sse_ucomineq_ss:
3878 case Intrinsic::x86_sse2_ucomineq_sd:
3879 Opc = X86ISD::UCOMI;
3885 translateX86CC(CC, true, X86CC, Flip);
3886 SDOperand Cond = DAG.getNode(Opc, MVT::Flag, Op.getOperand(Flip?2:1),
3887 Op.getOperand(Flip?1:2));
3888 SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8,
3889 DAG.getConstant(X86CC, MVT::i8), Cond);
3890 return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
3895 /// LowerOperation - Provide custom lowering hooks for some operations.
3897 SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
3898 switch (Op.getOpcode()) {
3899 default: assert(0 && "Should not custom lower this!");
3900 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
3901 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3902 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3903 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
3904 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
3905 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3906 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3907 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
3908 case ISD::SHL_PARTS:
3909 case ISD::SRA_PARTS:
3910 case ISD::SRL_PARTS: return LowerShift(Op, DAG);
3911 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
3912 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
3913 case ISD::FABS: return LowerFABS(Op, DAG);
3914 case ISD::FNEG: return LowerFNEG(Op, DAG);
3915 case ISD::SETCC: return LowerSETCC(Op, DAG);
3916 case ISD::SELECT: return LowerSELECT(Op, DAG);
3917 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
3918 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
3919 case ISD::RET: return LowerRET(Op, DAG);
3920 case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
3921 case ISD::MEMSET: return LowerMEMSET(Op, DAG);
3922 case ISD::MEMCPY: return LowerMEMCPY(Op, DAG);
3923 case ISD::READCYCLECOUNTER: return LowerREADCYCLCECOUNTER(Op, DAG);
3924 case ISD::VASTART: return LowerVASTART(Op, DAG);
3925 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3929 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
3931 default: return NULL;
3932 case X86ISD::SHLD: return "X86ISD::SHLD";
3933 case X86ISD::SHRD: return "X86ISD::SHRD";
3934 case X86ISD::FAND: return "X86ISD::FAND";
3935 case X86ISD::FXOR: return "X86ISD::FXOR";
3936 case X86ISD::FILD: return "X86ISD::FILD";
3937 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
3938 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
3939 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
3940 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
3941 case X86ISD::FLD: return "X86ISD::FLD";
3942 case X86ISD::FST: return "X86ISD::FST";
3943 case X86ISD::FP_GET_RESULT: return "X86ISD::FP_GET_RESULT";
3944 case X86ISD::FP_SET_RESULT: return "X86ISD::FP_SET_RESULT";
3945 case X86ISD::CALL: return "X86ISD::CALL";
3946 case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
3947 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
3948 case X86ISD::CMP: return "X86ISD::CMP";
3949 case X86ISD::TEST: return "X86ISD::TEST";
3950 case X86ISD::COMI: return "X86ISD::COMI";
3951 case X86ISD::UCOMI: return "X86ISD::UCOMI";
3952 case X86ISD::SETCC: return "X86ISD::SETCC";
3953 case X86ISD::CMOV: return "X86ISD::CMOV";
3954 case X86ISD::BRCOND: return "X86ISD::BRCOND";
3955 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
3956 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
3957 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
3958 case X86ISD::LOAD_PACK: return "X86ISD::LOAD_PACK";
3959 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
3960 case X86ISD::Wrapper: return "X86ISD::Wrapper";
3961 case X86ISD::S2VEC: return "X86ISD::S2VEC";
3962 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
3963 case X86ISD::PINSRW: return "X86ISD::PINSRW";
3967 void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
3969 uint64_t &KnownZero,
3971 unsigned Depth) const {
3972 unsigned Opc = Op.getOpcode();
3973 assert((Opc >= ISD::BUILTIN_OP_END ||
3974 Opc == ISD::INTRINSIC_WO_CHAIN ||
3975 Opc == ISD::INTRINSIC_W_CHAIN ||
3976 Opc == ISD::INTRINSIC_VOID) &&
3977 "Should use MaskedValueIsZero if you don't know whether Op"
3978 " is a target node!");
3980 KnownZero = KnownOne = 0; // Don't know anything.
3984 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
3989 std::vector<unsigned> X86TargetLowering::
3990 getRegClassForInlineAsmConstraint(const std::string &Constraint,
3991 MVT::ValueType VT) const {
3992 if (Constraint.size() == 1) {
3993 // FIXME: not handling fp-stack yet!
3994 // FIXME: not handling MMX registers yet ('y' constraint).
3995 switch (Constraint[0]) { // GCC X86 Constraint Letters
3996 default: break; // Unknown constriant letter
3997 case 'r': // GENERAL_REGS
3998 case 'R': // LEGACY_REGS
4000 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
4001 X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0);
4002 else if (VT == MVT::i16)
4003 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
4004 X86::SI, X86::DI, X86::BP, X86::SP, 0);
4005 else if (VT == MVT::i8)
4006 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
4008 case 'l': // INDEX_REGS
4010 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
4011 X86::ESI, X86::EDI, X86::EBP, 0);
4012 else if (VT == MVT::i16)
4013 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
4014 X86::SI, X86::DI, X86::BP, 0);
4015 else if (VT == MVT::i8)
4016 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
4018 case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
4021 return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
4022 else if (VT == MVT::i16)
4023 return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
4024 else if (VT == MVT::i8)
4025 return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
4027 case 'x': // SSE_REGS if SSE1 allowed
4028 if (Subtarget->hasSSE1())
4029 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
4030 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
4032 return std::vector<unsigned>();
4033 case 'Y': // SSE_REGS if SSE2 allowed
4034 if (Subtarget->hasSSE2())
4035 return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
4036 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7,
4038 return std::vector<unsigned>();
4042 return std::vector<unsigned>();
4045 /// isLegalAddressImmediate - Return true if the integer value or
4046 /// GlobalValue can be used as the offset of the target addressing mode.
4047 bool X86TargetLowering::isLegalAddressImmediate(int64_t V) const {
4048 // X86 allows a sign-extended 32-bit immediate field.
4049 return (V > -(1LL << 32) && V < (1LL << 32)-1);
4052 bool X86TargetLowering::isLegalAddressImmediate(GlobalValue *GV) const {
4053 if (Subtarget->isTargetDarwin()) {
4054 Reloc::Model RModel = getTargetMachine().getRelocationModel();
4055 if (RModel == Reloc::Static)
4057 else if (RModel == Reloc::DynamicNoPIC)
4058 return !DarwinGVRequiresExtraLoad(GV);
4065 /// isShuffleMaskLegal - Targets can use this to indicate that they only
4066 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
4067 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
4068 /// are assumed to be legal.
4070 X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
4071 // Only do shuffles on 128-bit vector types for now.
4072 if (MVT::getSizeInBits(VT) == 64) return false;
4073 return (Mask.Val->getNumOperands() <= 4 ||
4074 isSplatMask(Mask.Val) ||
4075 isPSHUFHW_PSHUFLWMask(Mask.Val) ||
4076 X86::isUNPCKLMask(Mask.Val) ||
4077 X86::isUNPCKL_v_undef_Mask(Mask.Val) ||
4078 X86::isUNPCKHMask(Mask.Val));
4081 bool X86TargetLowering::isVectorClearMaskLegal(std::vector<SDOperand> &BVOps,
4083 SelectionDAG &DAG) const {
4084 unsigned NumElts = BVOps.size();
4085 // Only do shuffles on 128-bit vector types for now.
4086 if (MVT::getSizeInBits(EVT) * NumElts == 64) return false;
4087 if (NumElts == 2) return true;
4089 return (isMOVLMask(BVOps) || isCommutedMOVL(BVOps, true) ||
4090 isSHUFPMask(BVOps) || isCommutedSHUFP(BVOps));