1 //===-- X86ISelPattern.cpp - A pattern matching inst selector for X86 -----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for X86.
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86RegisterInfo.h"
17 #include "X86Subtarget.h"
18 #include "llvm/CallingConv.h"
19 #include "llvm/Constants.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/Function.h"
22 #include "llvm/CodeGen/MachineConstantPool.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/SelectionDAG.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/CodeGen/SSARegMap.h"
28 #include "llvm/Target/TargetData.h"
29 #include "llvm/Target/TargetLowering.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Target/TargetOptions.h"
32 #include "llvm/Support/CFG.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/ADT/Statistic.h"
40 #include "llvm/Support/CommandLine.h"
41 static cl::opt<bool> EnableFastCC("enable-x86-fastcc", cl::Hidden,
42 cl::desc("Enable fastcc on X86"));
45 // X86 Specific DAG Nodes
48 // Start the numbering where the builtin ops leave off.
49 FIRST_NUMBER = ISD::BUILTIN_OP_END,
51 /// FILD64m - This instruction implements SINT_TO_FP with a
52 /// 64-bit source in memory and a FP reg result. This corresponds to
53 /// the X86::FILD64m instruction. It has two inputs (token chain and
54 /// address) and two outputs (FP value and token chain).
57 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
58 /// integer destination in memory and a FP reg source. This corresponds
59 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
60 /// has two inputs (token chain and address) and two outputs (FP value and
66 /// CALL/TAILCALL - These operations represent an abstract X86 call
67 /// instruction, which includes a bunch of information. In particular the
68 /// operands of these node are:
70 /// #0 - The incoming token chain
72 /// #2 - The number of arg bytes the caller pushes on the stack.
73 /// #3 - The number of arg bytes the callee pops off the stack.
74 /// #4 - The value to pass in AL/AX/EAX (optional)
75 /// #5 - The value to pass in DL/DX/EDX (optional)
77 /// The result values of these nodes are:
79 /// #0 - The outgoing token chain
80 /// #1 - The first register result value (optional)
81 /// #2 - The second register result value (optional)
83 /// The CALL vs TAILCALL distinction boils down to whether the callee is
84 /// known not to modify the caller's stack frame, as is standard with
92 //===----------------------------------------------------------------------===//
93 // X86TargetLowering - X86 Implementation of the TargetLowering interface
95 class X86TargetLowering : public TargetLowering {
96 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
97 int ReturnAddrIndex; // FrameIndex for return slot.
98 int BytesToPopOnReturn; // Number of arg bytes ret should pop.
99 int BytesCallerReserves; // Number of arg bytes caller makes.
101 X86TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
102 // Set up the TargetLowering object.
104 // X86 is weird, it always uses i8 for shift amounts and setcc results.
105 setShiftAmountType(MVT::i8);
106 setSetCCResultType(MVT::i8);
107 setSetCCResultContents(ZeroOrOneSetCCResult);
108 setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
110 // Set up the register classes.
111 // FIXME: Eliminate these two classes when legalize can handle promotions
113 addRegisterClass(MVT::i1, X86::R8RegisterClass);
114 addRegisterClass(MVT::i8, X86::R8RegisterClass);
115 addRegisterClass(MVT::i16, X86::R16RegisterClass);
116 addRegisterClass(MVT::i32, X86::R32RegisterClass);
118 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
120 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
121 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
122 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
123 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
125 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
127 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
128 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
131 // We can handle SINT_TO_FP and FP_TO_SINT from/TO i64 even though i64
133 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
134 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
135 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
136 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
139 // Handle FP_TO_UINT by promoting the destination to a larger signed
141 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
142 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
143 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
146 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
148 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
150 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
151 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
152 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
154 setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
155 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
156 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
157 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
158 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
159 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
160 setOperationAction(ISD::SREM , MVT::f64 , Expand);
161 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
162 setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
163 setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
164 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
165 setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
166 setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
167 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
168 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
169 setOperationAction(ISD::CTLZ , MVT::i32 , Expand);
171 setOperationAction(ISD::READIO , MVT::i1 , Expand);
172 setOperationAction(ISD::READIO , MVT::i8 , Expand);
173 setOperationAction(ISD::READIO , MVT::i16 , Expand);
174 setOperationAction(ISD::READIO , MVT::i32 , Expand);
175 setOperationAction(ISD::WRITEIO , MVT::i1 , Expand);
176 setOperationAction(ISD::WRITEIO , MVT::i8 , Expand);
177 setOperationAction(ISD::WRITEIO , MVT::i16 , Expand);
178 setOperationAction(ISD::WRITEIO , MVT::i32 , Expand);
180 // These should be promoted to a larger select which is supported.
181 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
182 setOperationAction(ISD::SELECT , MVT::i8 , Promote);
185 // Set up the FP register classes.
186 addRegisterClass(MVT::f32, X86::RXMMRegisterClass);
187 addRegisterClass(MVT::f64, X86::RXMMRegisterClass);
189 // SSE has no load+extend ops
190 setOperationAction(ISD::EXTLOAD, MVT::f32, Expand);
191 setOperationAction(ISD::ZEXTLOAD, MVT::f32, Expand);
193 // SSE has no i16 to fp conversion, only i32
194 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
195 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
197 // Expand FP_TO_UINT into a select.
198 // FIXME: We would like to use a Custom expander here eventually to do
199 // the optimal thing for SSE vs. the default expansion in the legalizer.
200 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
202 // We don't support sin/cos/sqrt/fmod
203 setOperationAction(ISD::FSIN , MVT::f64, Expand);
204 setOperationAction(ISD::FCOS , MVT::f64, Expand);
205 setOperationAction(ISD::FABS , MVT::f64, Expand);
206 setOperationAction(ISD::FNEG , MVT::f64, Expand);
207 setOperationAction(ISD::SREM , MVT::f64, Expand);
208 setOperationAction(ISD::FSIN , MVT::f32, Expand);
209 setOperationAction(ISD::FCOS , MVT::f32, Expand);
210 setOperationAction(ISD::FABS , MVT::f32, Expand);
211 setOperationAction(ISD::FNEG , MVT::f32, Expand);
212 setOperationAction(ISD::SREM , MVT::f32, Expand);
214 addLegalFPImmediate(+0.0); // xorps / xorpd
216 // Set up the FP register classes.
217 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
220 setOperationAction(ISD::FSIN , MVT::f64 , Expand);
221 setOperationAction(ISD::FCOS , MVT::f64 , Expand);
224 addLegalFPImmediate(+0.0); // FLD0
225 addLegalFPImmediate(+1.0); // FLD1
226 addLegalFPImmediate(-0.0); // FLD0/FCHS
227 addLegalFPImmediate(-1.0); // FLD1/FCHS
229 computeRegisterProperties();
231 maxStoresPerMemSet = 8; // For %llvm.memset -> sequence of stores
232 maxStoresPerMemCpy = 8; // For %llvm.memcpy -> sequence of stores
233 maxStoresPerMemMove = 8; // For %llvm.memmove -> sequence of stores
234 allowUnalignedStores = true; // x86 supports it!
237 // Return the number of bytes that a function should pop when it returns (in
238 // addition to the space used by the return address).
240 unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; }
242 // Return the number of bytes that the caller reserves for arguments passed
244 unsigned getBytesCallerReserves() const { return BytesCallerReserves; }
246 /// LowerOperation - Provide custom lowering hooks for some operations.
248 virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
250 /// LowerArguments - This hook must be implemented to indicate how we should
251 /// lower the arguments for the specified function, into the specified DAG.
252 virtual std::vector<SDOperand>
253 LowerArguments(Function &F, SelectionDAG &DAG);
255 /// LowerCallTo - This hook lowers an abstract call to a function into an
257 virtual std::pair<SDOperand, SDOperand>
258 LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg, unsigned CC,
259 bool isTailCall, SDOperand Callee, ArgListTy &Args,
262 virtual SDOperand LowerVAStart(SDOperand Chain, SDOperand VAListP,
263 Value *VAListV, SelectionDAG &DAG);
264 virtual std::pair<SDOperand,SDOperand>
265 LowerVAArg(SDOperand Chain, SDOperand VAListP, Value *VAListV,
266 const Type *ArgTy, SelectionDAG &DAG);
268 virtual std::pair<SDOperand, SDOperand>
269 LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
272 SDOperand getReturnAddressFrameIndex(SelectionDAG &DAG);
275 // C Calling Convention implementation.
276 std::vector<SDOperand> LowerCCCArguments(Function &F, SelectionDAG &DAG);
277 std::pair<SDOperand, SDOperand>
278 LowerCCCCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
280 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
282 // Fast Calling Convention implementation.
283 std::vector<SDOperand> LowerFastCCArguments(Function &F, SelectionDAG &DAG);
284 std::pair<SDOperand, SDOperand>
285 LowerFastCCCallTo(SDOperand Chain, const Type *RetTy, bool isTailCall,
286 SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG);
290 std::vector<SDOperand>
291 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
292 if (F.getCallingConv() == CallingConv::Fast && EnableFastCC)
293 return LowerFastCCArguments(F, DAG);
294 return LowerCCCArguments(F, DAG);
297 std::pair<SDOperand, SDOperand>
298 X86TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
299 bool isVarArg, unsigned CallingConv,
301 SDOperand Callee, ArgListTy &Args,
303 assert((!isVarArg || CallingConv == CallingConv::C) &&
304 "Only C takes varargs!");
305 if (CallingConv == CallingConv::Fast && EnableFastCC)
306 return LowerFastCCCallTo(Chain, RetTy, isTailCall, Callee, Args, DAG);
307 return LowerCCCCallTo(Chain, RetTy, isVarArg, isTailCall, Callee, Args, DAG);
310 //===----------------------------------------------------------------------===//
311 // C Calling Convention implementation
312 //===----------------------------------------------------------------------===//
314 std::vector<SDOperand>
315 X86TargetLowering::LowerCCCArguments(Function &F, SelectionDAG &DAG) {
316 std::vector<SDOperand> ArgValues;
318 MachineFunction &MF = DAG.getMachineFunction();
319 MachineFrameInfo *MFI = MF.getFrameInfo();
321 // Add DAG nodes to load the arguments... On entry to a function on the X86,
322 // the stack frame looks like this:
324 // [ESP] -- return address
325 // [ESP + 4] -- first argument (leftmost lexically)
326 // [ESP + 8] -- second argument, if first argument is four bytes in size
329 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
330 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
331 MVT::ValueType ObjectVT = getValueType(I->getType());
332 unsigned ArgIncrement = 4;
335 default: assert(0 && "Unhandled argument type!");
337 case MVT::i8: ObjSize = 1; break;
338 case MVT::i16: ObjSize = 2; break;
339 case MVT::i32: ObjSize = 4; break;
340 case MVT::i64: ObjSize = ArgIncrement = 8; break;
341 case MVT::f32: ObjSize = 4; break;
342 case MVT::f64: ObjSize = ArgIncrement = 8; break;
344 // Create the frame index object for this incoming parameter...
345 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
347 // Create the SelectionDAG nodes corresponding to a load from this parameter
348 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
350 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
354 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
355 DAG.getSrcValue(NULL));
357 if (MVT::isInteger(ObjectVT))
358 ArgValue = DAG.getConstant(0, ObjectVT);
360 ArgValue = DAG.getConstantFP(0, ObjectVT);
362 ArgValues.push_back(ArgValue);
364 ArgOffset += ArgIncrement; // Move on to the next argument...
367 // If the function takes variable number of arguments, make a frame index for
368 // the start of the first vararg value... for expansion of llvm.va_start.
370 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
371 ReturnAddrIndex = 0; // No return address slot generated yet.
372 BytesToPopOnReturn = 0; // Callee pops nothing.
373 BytesCallerReserves = ArgOffset;
375 // Finally, inform the code generator which regs we return values in.
376 switch (getValueType(F.getReturnType())) {
377 default: assert(0 && "Unknown type!");
378 case MVT::isVoid: break;
383 MF.addLiveOut(X86::EAX);
386 MF.addLiveOut(X86::EAX);
387 MF.addLiveOut(X86::EDX);
391 MF.addLiveOut(X86::ST0);
397 std::pair<SDOperand, SDOperand>
398 X86TargetLowering::LowerCCCCallTo(SDOperand Chain, const Type *RetTy,
399 bool isVarArg, bool isTailCall,
400 SDOperand Callee, ArgListTy &Args,
402 // Count how many bytes are to be pushed on the stack.
403 unsigned NumBytes = 0;
407 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
408 DAG.getConstant(0, getPointerTy()));
410 for (unsigned i = 0, e = Args.size(); i != e; ++i)
411 switch (getValueType(Args[i].second)) {
412 default: assert(0 && "Unknown value type!");
426 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
427 DAG.getConstant(NumBytes, getPointerTy()));
429 // Arguments go on the stack in reverse order, as specified by the ABI.
430 unsigned ArgOffset = 0;
431 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32,
433 std::vector<SDOperand> Stores;
435 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
436 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
437 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
439 switch (getValueType(Args[i].second)) {
440 default: assert(0 && "Unexpected ValueType for argument!");
444 // Promote the integer to 32 bits. If the input type is signed use a
445 // sign extend, otherwise use a zero extend.
446 if (Args[i].second->isSigned())
447 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
449 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
454 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
455 Args[i].first, PtrOff,
456 DAG.getSrcValue(NULL)));
461 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
462 Args[i].first, PtrOff,
463 DAG.getSrcValue(NULL)));
468 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
471 std::vector<MVT::ValueType> RetVals;
472 MVT::ValueType RetTyVT = getValueType(RetTy);
473 RetVals.push_back(MVT::Other);
475 // The result values produced have to be legal. Promote the result.
477 case MVT::isVoid: break;
479 RetVals.push_back(RetTyVT);
484 RetVals.push_back(MVT::i32);
488 RetVals.push_back(MVT::f32);
490 RetVals.push_back(MVT::f64);
493 RetVals.push_back(MVT::i32);
494 RetVals.push_back(MVT::i32);
497 std::vector<SDOperand> Ops;
498 Ops.push_back(Chain);
499 Ops.push_back(Callee);
500 Ops.push_back(DAG.getConstant(NumBytes, getPointerTy()));
501 Ops.push_back(DAG.getConstant(0, getPointerTy()));
502 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
504 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
508 case MVT::isVoid: break;
510 ResultVal = TheCall.getValue(1);
515 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
518 // FIXME: we would really like to remember that this FP_ROUND operation is
519 // okay to eliminate if we allow excess FP precision.
520 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
523 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
524 TheCall.getValue(2));
528 return std::make_pair(ResultVal, Chain);
532 X86TargetLowering::LowerVAStart(SDOperand Chain, SDOperand VAListP,
533 Value *VAListV, SelectionDAG &DAG) {
534 // vastart just stores the address of the VarArgsFrameIndex slot.
535 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32);
536 return DAG.getNode(ISD::STORE, MVT::Other, Chain, FR, VAListP,
537 DAG.getSrcValue(VAListV));
541 std::pair<SDOperand,SDOperand>
542 X86TargetLowering::LowerVAArg(SDOperand Chain, SDOperand VAListP,
543 Value *VAListV, const Type *ArgTy,
545 MVT::ValueType ArgVT = getValueType(ArgTy);
546 SDOperand Val = DAG.getLoad(MVT::i32, Chain,
547 VAListP, DAG.getSrcValue(VAListV));
548 SDOperand Result = DAG.getLoad(ArgVT, Chain, Val,
549 DAG.getSrcValue(NULL));
551 if (ArgVT == MVT::i32)
554 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
555 "Other types should have been promoted for varargs!");
558 Val = DAG.getNode(ISD::ADD, Val.getValueType(), Val,
559 DAG.getConstant(Amt, Val.getValueType()));
560 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
561 Val, VAListP, DAG.getSrcValue(VAListV));
562 return std::make_pair(Result, Chain);
565 //===----------------------------------------------------------------------===//
566 // Fast Calling Convention implementation
567 //===----------------------------------------------------------------------===//
569 // The X86 'fast' calling convention passes up to two integer arguments in
570 // registers (an appropriate portion of EAX/EDX), passes arguments in C order,
571 // and requires that the callee pop its arguments off the stack (allowing proper
572 // tail calls), and has the same return value conventions as C calling convs.
574 // This calling convention always arranges for the callee pop value to be 8n+4
575 // bytes, which is needed for tail recursion elimination and stack alignment
578 // Note that this can be enhanced in the future to pass fp vals in registers
579 // (when we have a global fp allocator) and do other tricks.
582 /// AddLiveIn - This helper function adds the specified physical register to the
583 /// MachineFunction as a live in value. It also creates a corresponding virtual
585 static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
586 TargetRegisterClass *RC) {
587 assert(RC->contains(PReg) && "Not the correct regclass!");
588 unsigned VReg = MF.getSSARegMap()->createVirtualRegister(RC);
589 MF.addLiveIn(PReg, VReg);
594 std::vector<SDOperand>
595 X86TargetLowering::LowerFastCCArguments(Function &F, SelectionDAG &DAG) {
596 std::vector<SDOperand> ArgValues;
598 MachineFunction &MF = DAG.getMachineFunction();
599 MachineFrameInfo *MFI = MF.getFrameInfo();
601 // Add DAG nodes to load the arguments... On entry to a function the stack
602 // frame looks like this:
604 // [ESP] -- return address
605 // [ESP + 4] -- first nonreg argument (leftmost lexically)
606 // [ESP + 8] -- second nonreg argument, if first argument is 4 bytes in size
608 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
610 // Keep track of the number of integer regs passed so far. This can be either
611 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
613 unsigned NumIntRegs = 0;
615 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
616 MVT::ValueType ObjectVT = getValueType(I->getType());
617 unsigned ArgIncrement = 4;
618 unsigned ObjSize = 0;
622 default: assert(0 && "Unhandled argument type!");
625 if (NumIntRegs < 2) {
626 if (!I->use_empty()) {
627 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
628 X86::R8RegisterClass);
629 ArgValue = DAG.getCopyFromReg(VReg, MVT::i8, DAG.getRoot());
630 DAG.setRoot(ArgValue.getValue(1));
639 if (NumIntRegs < 2) {
640 if (!I->use_empty()) {
641 unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
642 X86::R16RegisterClass);
643 ArgValue = DAG.getCopyFromReg(VReg, MVT::i16, DAG.getRoot());
644 DAG.setRoot(ArgValue.getValue(1));
652 if (NumIntRegs < 2) {
653 if (!I->use_empty()) {
654 unsigned VReg = AddLiveIn(MF,NumIntRegs ? X86::EDX : X86::EAX,
655 X86::R32RegisterClass);
656 ArgValue = DAG.getCopyFromReg(VReg, MVT::i32, DAG.getRoot());
657 DAG.setRoot(ArgValue.getValue(1));
665 if (NumIntRegs == 0) {
666 if (!I->use_empty()) {
667 unsigned BotReg = AddLiveIn(MF, X86::EAX, X86::R32RegisterClass);
668 unsigned TopReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
670 SDOperand Low=DAG.getCopyFromReg(BotReg, MVT::i32, DAG.getRoot());
671 SDOperand Hi =DAG.getCopyFromReg(TopReg, MVT::i32, Low.getValue(1));
672 DAG.setRoot(Hi.getValue(1));
674 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
678 } else if (NumIntRegs == 1) {
679 if (!I->use_empty()) {
680 unsigned BotReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
681 SDOperand Low = DAG.getCopyFromReg(BotReg, MVT::i32, DAG.getRoot());
682 DAG.setRoot(Low.getValue(1));
684 // Load the high part from memory.
685 // Create the frame index object for this incoming parameter...
686 int FI = MFI->CreateFixedObject(4, ArgOffset);
687 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
688 SDOperand Hi = DAG.getLoad(MVT::i32, DAG.getEntryNode(), FIN,
689 DAG.getSrcValue(NULL));
690 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
696 ObjSize = ArgIncrement = 8;
698 case MVT::f32: ObjSize = 4; break;
699 case MVT::f64: ObjSize = ArgIncrement = 8; break;
702 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
704 if (ObjSize && !I->use_empty()) {
705 // Create the frame index object for this incoming parameter...
706 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
708 // Create the SelectionDAG nodes corresponding to a load from this
710 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
712 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN,
713 DAG.getSrcValue(NULL));
714 } else if (ArgValue.Val == 0) {
715 if (MVT::isInteger(ObjectVT))
716 ArgValue = DAG.getConstant(0, ObjectVT);
718 ArgValue = DAG.getConstantFP(0, ObjectVT);
720 ArgValues.push_back(ArgValue);
723 ArgOffset += ArgIncrement; // Move on to the next argument.
726 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
727 // arguments and the arguments after the retaddr has been pushed are aligned.
728 if ((ArgOffset & 7) == 0)
731 VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
732 ReturnAddrIndex = 0; // No return address slot generated yet.
733 BytesToPopOnReturn = ArgOffset; // Callee pops all stack arguments.
734 BytesCallerReserves = 0;
736 // Finally, inform the code generator which regs we return values in.
737 switch (getValueType(F.getReturnType())) {
738 default: assert(0 && "Unknown type!");
739 case MVT::isVoid: break;
744 MF.addLiveOut(X86::EAX);
747 MF.addLiveOut(X86::EAX);
748 MF.addLiveOut(X86::EDX);
752 MF.addLiveOut(X86::ST0);
758 std::pair<SDOperand, SDOperand>
759 X86TargetLowering::LowerFastCCCallTo(SDOperand Chain, const Type *RetTy,
760 bool isTailCall, SDOperand Callee,
761 ArgListTy &Args, SelectionDAG &DAG) {
762 // Count how many bytes are to be pushed on the stack.
763 unsigned NumBytes = 0;
765 // Keep track of the number of integer regs passed so far. This can be either
766 // 0 (neither EAX or EDX used), 1 (EAX is used) or 2 (EAX and EDX are both
768 unsigned NumIntRegs = 0;
770 for (unsigned i = 0, e = Args.size(); i != e; ++i)
771 switch (getValueType(Args[i].second)) {
772 default: assert(0 && "Unknown value type!");
777 if (NumIntRegs < 2) {
786 if (NumIntRegs == 0) {
789 } else if (NumIntRegs == 1) {
801 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
802 // arguments and the arguments after the retaddr has been pushed are aligned.
803 if ((NumBytes & 7) == 0)
806 Chain = DAG.getNode(ISD::CALLSEQ_START, MVT::Other, Chain,
807 DAG.getConstant(NumBytes, getPointerTy()));
809 // Arguments go on the stack in reverse order, as specified by the ABI.
810 unsigned ArgOffset = 0;
811 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32,
814 std::vector<SDOperand> Stores;
815 std::vector<SDOperand> RegValuesToPass;
816 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
817 switch (getValueType(Args[i].second)) {
818 default: assert(0 && "Unexpected ValueType for argument!");
823 if (NumIntRegs < 2) {
824 RegValuesToPass.push_back(Args[i].first);
830 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
831 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
832 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
833 Args[i].first, PtrOff,
834 DAG.getSrcValue(NULL)));
839 if (NumIntRegs < 2) { // Can pass part of it in regs?
840 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
841 Args[i].first, DAG.getConstant(1, MVT::i32));
842 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
843 Args[i].first, DAG.getConstant(0, MVT::i32));
844 RegValuesToPass.push_back(Lo);
846 if (NumIntRegs < 2) { // Pass both parts in regs?
847 RegValuesToPass.push_back(Hi);
850 // Pass the high part in memory.
851 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
852 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
853 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
854 Hi, PtrOff, DAG.getSrcValue(NULL)));
861 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
862 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
863 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
864 Args[i].first, PtrOff,
865 DAG.getSrcValue(NULL)));
871 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
873 // Make sure the instruction takes 8n+4 bytes to make sure the start of the
874 // arguments and the arguments after the retaddr has been pushed are aligned.
875 if ((ArgOffset & 7) == 0)
878 std::vector<MVT::ValueType> RetVals;
879 MVT::ValueType RetTyVT = getValueType(RetTy);
881 RetVals.push_back(MVT::Other);
883 // The result values produced have to be legal. Promote the result.
885 case MVT::isVoid: break;
887 RetVals.push_back(RetTyVT);
892 RetVals.push_back(MVT::i32);
896 RetVals.push_back(MVT::f32);
898 RetVals.push_back(MVT::f64);
901 RetVals.push_back(MVT::i32);
902 RetVals.push_back(MVT::i32);
906 std::vector<SDOperand> Ops;
907 Ops.push_back(Chain);
908 Ops.push_back(Callee);
909 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
910 // Callee pops all arg values on the stack.
911 Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
913 // Pass register arguments as needed.
914 Ops.insert(Ops.end(), RegValuesToPass.begin(), RegValuesToPass.end());
916 SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
918 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
922 case MVT::isVoid: break;
924 ResultVal = TheCall.getValue(1);
929 ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
932 // FIXME: we would really like to remember that this FP_ROUND operation is
933 // okay to eliminate if we allow excess FP precision.
934 ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
937 ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
938 TheCall.getValue(2));
942 return std::make_pair(ResultVal, Chain);
945 SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
946 if (ReturnAddrIndex == 0) {
947 // Set up a frame object for the return address.
948 MachineFunction &MF = DAG.getMachineFunction();
949 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
952 return DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
957 std::pair<SDOperand, SDOperand> X86TargetLowering::
958 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
961 if (Depth) // Depths > 0 not supported yet!
962 Result = DAG.getConstant(0, getPointerTy());
964 SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG);
966 // Just load the return address
967 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI,
968 DAG.getSrcValue(NULL));
970 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
971 DAG.getConstant(4, MVT::i32));
973 return std::make_pair(Result, Chain);
976 //===----------------------------------------------------------------------===//
977 // X86 Custom Lowering Hooks
978 //===----------------------------------------------------------------------===//
980 /// LowerOperation - Provide custom lowering hooks for some operations.
982 SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
983 switch (Op.getOpcode()) {
984 default: assert(0 && "Should not custom lower this!");
985 case ISD::SINT_TO_FP: {
986 assert(Op.getValueType() == MVT::f64 &&
987 Op.getOperand(0).getValueType() == MVT::i64 &&
988 "Unknown SINT_TO_FP to lower!");
989 // We lower sint64->FP into a store to a temporary stack slot, followed by a
991 MachineFunction &MF = DAG.getMachineFunction();
992 int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
993 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
994 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, DAG.getEntryNode(),
995 Op.getOperand(0), StackSlot, DAG.getSrcValue(NULL));
996 std::vector<MVT::ValueType> RTs;
997 RTs.push_back(MVT::f64);
998 RTs.push_back(MVT::Other);
999 std::vector<SDOperand> Ops;
1000 Ops.push_back(Store);
1001 Ops.push_back(StackSlot);
1002 return DAG.getNode(X86ISD::FILD64m, RTs, Ops);
1004 case ISD::FP_TO_SINT: {
1005 assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
1006 Op.getOperand(0).getValueType() == MVT::f64 &&
1007 "Unknown FP_TO_SINT to lower!");
1008 // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
1010 MachineFunction &MF = DAG.getMachineFunction();
1011 unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
1012 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
1013 SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
1016 switch (Op.getValueType()) {
1017 default: assert(0 && "Invalid FP_TO_SINT to lower!");
1018 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
1019 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
1020 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
1023 // Build the FP_TO_INT*_IN_MEM
1024 std::vector<SDOperand> Ops;
1025 Ops.push_back(DAG.getEntryNode());
1026 Ops.push_back(Op.getOperand(0));
1027 Ops.push_back(StackSlot);
1028 SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops);
1031 return DAG.getLoad(Op.getValueType(), FIST, StackSlot,
1032 DAG.getSrcValue(NULL));
1038 //===----------------------------------------------------------------------===//
1039 // Pattern Matcher Implementation
1040 //===----------------------------------------------------------------------===//
1043 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
1044 /// SDOperand's instead of register numbers for the leaves of the matched
1046 struct X86ISelAddressMode {
1052 struct { // This is really a union, discriminated by BaseType!
1062 X86ISelAddressMode()
1063 : BaseType(RegBase), Scale(1), IndexReg(), Disp(), GV(0) {
1071 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
1073 //===--------------------------------------------------------------------===//
1074 /// ISel - X86 specific code to select X86 machine instructions for
1075 /// SelectionDAG operations.
1077 class ISel : public SelectionDAGISel {
1078 /// ContainsFPCode - Every instruction we select that uses or defines a FP
1079 /// register should set this to true.
1080 bool ContainsFPCode;
1082 /// X86Lowering - This object fully describes how to lower LLVM code to an
1083 /// X86-specific SelectionDAG.
1084 X86TargetLowering X86Lowering;
1086 /// RegPressureMap - This keeps an approximate count of the number of
1087 /// registers required to evaluate each node in the graph.
1088 std::map<SDNode*, unsigned> RegPressureMap;
1090 /// ExprMap - As shared expressions are codegen'd, we keep track of which
1091 /// vreg the value is produced in, so we only emit one copy of each compiled
1093 std::map<SDOperand, unsigned> ExprMap;
1095 /// TheDAG - The DAG being selected during Select* operations.
1096 SelectionDAG *TheDAG;
1098 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
1099 /// make the right decision when generating code for different targets.
1100 const X86Subtarget *Subtarget;
1102 ISel(TargetMachine &TM) : SelectionDAGISel(X86Lowering), X86Lowering(TM) {
1103 Subtarget = &TM.getSubtarget<X86Subtarget>();
1106 virtual const char *getPassName() const {
1107 return "X86 Pattern Instruction Selection";
1110 unsigned getRegPressure(SDOperand O) {
1111 return RegPressureMap[O.Val];
1113 unsigned ComputeRegPressure(SDOperand O);
1115 /// InstructionSelectBasicBlock - This callback is invoked by
1116 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
1117 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
1119 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
1121 bool isFoldableLoad(SDOperand Op, SDOperand OtherOp,
1122 bool FloatPromoteOk = false);
1123 void EmitFoldedLoad(SDOperand Op, X86AddressMode &AM);
1124 bool TryToFoldLoadOpStore(SDNode *Node);
1125 bool EmitOrOpOp(SDOperand Op1, SDOperand Op2, unsigned DestReg);
1126 void EmitCMP(SDOperand LHS, SDOperand RHS, bool isOnlyUse);
1127 bool EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, SDOperand Cond);
1128 void EmitSelectCC(SDOperand Cond, SDOperand True, SDOperand False,
1129 MVT::ValueType SVT, unsigned RDest);
1130 unsigned SelectExpr(SDOperand N);
1132 X86AddressMode SelectAddrExprs(const X86ISelAddressMode &IAM);
1133 bool MatchAddress(SDOperand N, X86ISelAddressMode &AM);
1134 void SelectAddress(SDOperand N, X86AddressMode &AM);
1135 bool EmitPotentialTailCall(SDNode *Node);
1136 void EmitFastCCToFastCCTailCall(SDNode *TailCallNode);
1137 void Select(SDOperand N);
1141 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
1142 /// the main function.
1143 static void EmitSpecialCodeForMain(MachineBasicBlock *BB,
1144 MachineFrameInfo *MFI) {
1145 // Switch the FPU to 64-bit precision mode for better compatibility and speed.
1146 int CWFrameIdx = MFI->CreateStackObject(2, 2);
1147 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1149 // Set the high part to be 64-bit precision.
1150 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
1151 CWFrameIdx, 1).addImm(2);
1153 // Reload the modified control word now.
1154 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1157 void ISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
1158 // If this function has live-in values, emit the copies from pregs to vregs at
1159 // the top of the function, before anything else.
1160 MachineBasicBlock *BB = MF.begin();
1161 if (MF.livein_begin() != MF.livein_end()) {
1162 SSARegMap *RegMap = MF.getSSARegMap();
1163 for (MachineFunction::livein_iterator LI = MF.livein_begin(),
1164 E = MF.livein_end(); LI != E; ++LI) {
1165 const TargetRegisterClass *RC = RegMap->getRegClass(LI->second);
1166 if (RC == X86::R8RegisterClass) {
1167 BuildMI(BB, X86::MOV8rr, 1, LI->second).addReg(LI->first);
1168 } else if (RC == X86::R16RegisterClass) {
1169 BuildMI(BB, X86::MOV16rr, 1, LI->second).addReg(LI->first);
1170 } else if (RC == X86::R32RegisterClass) {
1171 BuildMI(BB, X86::MOV32rr, 1, LI->second).addReg(LI->first);
1172 } else if (RC == X86::RFPRegisterClass) {
1173 BuildMI(BB, X86::FpMOV, 1, LI->second).addReg(LI->first);
1174 } else if (RC == X86::RXMMRegisterClass) {
1175 BuildMI(BB, X86::MOVAPDrr, 1, LI->second).addReg(LI->first);
1177 assert(0 && "Unknown regclass!");
1183 // If this is main, emit special code for main.
1184 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
1185 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
1189 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
1190 /// when it has created a SelectionDAG for us to codegen.
1191 void ISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
1192 // While we're doing this, keep track of whether we see any FP code for
1193 // FP_REG_KILL insertion.
1194 ContainsFPCode = false;
1195 MachineFunction *MF = BB->getParent();
1197 // Scan the PHI nodes that already are inserted into this basic block. If any
1198 // of them is a PHI of a floating point value, we need to insert an
1200 SSARegMap *RegMap = MF->getSSARegMap();
1201 if (BB != MF->begin())
1202 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
1204 assert(I->getOpcode() == X86::PHI &&
1205 "Isn't just PHI nodes?");
1206 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
1207 X86::RFPRegisterClass) {
1208 ContainsFPCode = true;
1213 // Compute the RegPressureMap, which is an approximation for the number of
1214 // registers required to compute each node.
1215 ComputeRegPressure(DAG.getRoot());
1219 // Codegen the basic block.
1220 Select(DAG.getRoot());
1224 // Finally, look at all of the successors of this block. If any contain a PHI
1225 // node of FP type, we need to insert an FP_REG_KILL in this block.
1226 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
1227 E = BB->succ_end(); SI != E && !ContainsFPCode; ++SI)
1228 for (MachineBasicBlock::iterator I = (*SI)->begin(), E = (*SI)->end();
1229 I != E && I->getOpcode() == X86::PHI; ++I) {
1230 if (RegMap->getRegClass(I->getOperand(0).getReg()) ==
1231 X86::RFPRegisterClass) {
1232 ContainsFPCode = true;
1237 // Final check, check LLVM BB's that are successors to the LLVM BB
1238 // corresponding to BB for FP PHI nodes.
1239 const BasicBlock *LLVMBB = BB->getBasicBlock();
1241 if (!ContainsFPCode)
1242 for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
1243 SI != E && !ContainsFPCode; ++SI)
1244 for (BasicBlock::const_iterator II = SI->begin();
1245 (PN = dyn_cast<PHINode>(II)); ++II)
1246 if (PN->getType()->isFloatingPoint()) {
1247 ContainsFPCode = true;
1252 // Insert FP_REG_KILL instructions into basic blocks that need them. This
1253 // only occurs due to the floating point stackifier not being aggressive
1254 // enough to handle arbitrary global stackification.
1256 // Currently we insert an FP_REG_KILL instruction into each block that uses or
1257 // defines a floating point virtual register.
1259 // When the global register allocators (like linear scan) finally update live
1260 // variable analysis, we can keep floating point values in registers across
1261 // basic blocks. This will be a huge win, but we are waiting on the global
1262 // allocators before we can do this.
1264 if (ContainsFPCode) {
1265 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
1269 // Clear state used for selection.
1271 RegPressureMap.clear();
1275 // ComputeRegPressure - Compute the RegPressureMap, which is an approximation
1276 // for the number of registers required to compute each node. This is basically
1277 // computing a generalized form of the Sethi-Ullman number for each node.
1278 unsigned ISel::ComputeRegPressure(SDOperand O) {
1280 unsigned &Result = RegPressureMap[N];
1281 if (Result) return Result;
1283 // FIXME: Should operations like CALL (which clobber lots o regs) have a
1284 // higher fixed cost??
1286 if (N->getNumOperands() == 0) {
1289 unsigned MaxRegUse = 0;
1290 unsigned NumExtraMaxRegUsers = 0;
1291 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
1293 if (N->getOperand(i).getOpcode() == ISD::Constant)
1296 Regs = ComputeRegPressure(N->getOperand(i));
1297 if (Regs > MaxRegUse) {
1299 NumExtraMaxRegUsers = 0;
1300 } else if (Regs == MaxRegUse &&
1301 N->getOperand(i).getValueType() != MVT::Other) {
1302 ++NumExtraMaxRegUsers;
1306 if (O.getOpcode() != ISD::TokenFactor)
1307 Result = MaxRegUse+NumExtraMaxRegUsers;
1309 Result = MaxRegUse == 1 ? 0 : MaxRegUse-1;
1312 //std::cerr << " WEIGHT: " << Result << " "; N->dump(); std::cerr << "\n";
1316 /// NodeTransitivelyUsesValue - Return true if N or any of its uses uses Op.
1317 /// The DAG cannot have cycles in it, by definition, so the visited set is not
1318 /// needed to prevent infinite loops. The DAG CAN, however, have unbounded
1319 /// reuse, so it prevents exponential cases.
1321 static bool NodeTransitivelyUsesValue(SDOperand N, SDOperand Op,
1322 std::set<SDNode*> &Visited) {
1323 if (N == Op) return true; // Found it.
1324 SDNode *Node = N.Val;
1325 if (Node->getNumOperands() == 0 || // Leaf?
1326 Node->getNodeDepth() <= Op.getNodeDepth()) return false; // Can't find it?
1327 if (!Visited.insert(Node).second) return false; // Already visited?
1329 // Recurse for the first N-1 operands.
1330 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
1331 if (NodeTransitivelyUsesValue(Node->getOperand(i), Op, Visited))
1334 // Tail recurse for the last operand.
1335 return NodeTransitivelyUsesValue(Node->getOperand(0), Op, Visited);
1338 X86AddressMode ISel::SelectAddrExprs(const X86ISelAddressMode &IAM) {
1339 X86AddressMode Result;
1341 // If we need to emit two register operands, emit the one with the highest
1342 // register pressure first.
1343 if (IAM.BaseType == X86ISelAddressMode::RegBase &&
1344 IAM.Base.Reg.Val && IAM.IndexReg.Val) {
1345 bool EmitBaseThenIndex;
1346 if (getRegPressure(IAM.Base.Reg) > getRegPressure(IAM.IndexReg)) {
1347 std::set<SDNode*> Visited;
1348 EmitBaseThenIndex = true;
1349 // If Base ends up pointing to Index, we must emit index first. This is
1350 // because of the way we fold loads, we may end up doing bad things with
1352 if (NodeTransitivelyUsesValue(IAM.Base.Reg, IAM.IndexReg, Visited))
1353 EmitBaseThenIndex = false;
1355 std::set<SDNode*> Visited;
1356 EmitBaseThenIndex = false;
1357 // If Base ends up pointing to Index, we must emit index first. This is
1358 // because of the way we fold loads, we may end up doing bad things with
1360 if (NodeTransitivelyUsesValue(IAM.IndexReg, IAM.Base.Reg, Visited))
1361 EmitBaseThenIndex = true;
1364 if (EmitBaseThenIndex) {
1365 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1366 Result.IndexReg = SelectExpr(IAM.IndexReg);
1368 Result.IndexReg = SelectExpr(IAM.IndexReg);
1369 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1372 } else if (IAM.BaseType == X86ISelAddressMode::RegBase && IAM.Base.Reg.Val) {
1373 Result.Base.Reg = SelectExpr(IAM.Base.Reg);
1374 } else if (IAM.IndexReg.Val) {
1375 Result.IndexReg = SelectExpr(IAM.IndexReg);
1378 switch (IAM.BaseType) {
1379 case X86ISelAddressMode::RegBase:
1380 Result.BaseType = X86AddressMode::RegBase;
1382 case X86ISelAddressMode::FrameIndexBase:
1383 Result.BaseType = X86AddressMode::FrameIndexBase;
1384 Result.Base.FrameIndex = IAM.Base.FrameIndex;
1387 assert(0 && "Unknown base type!");
1390 Result.Scale = IAM.Scale;
1391 Result.Disp = IAM.Disp;
1396 /// SelectAddress - Pattern match the maximal addressing mode for this node and
1397 /// emit all of the leaf registers.
1398 void ISel::SelectAddress(SDOperand N, X86AddressMode &AM) {
1399 X86ISelAddressMode IAM;
1400 MatchAddress(N, IAM);
1401 AM = SelectAddrExprs(IAM);
1404 /// MatchAddress - Add the specified node to the specified addressing mode,
1405 /// returning true if it cannot be done. This just pattern matches for the
1406 /// addressing mode, it does not cause any code to be emitted. For that, use
1408 bool ISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM) {
1409 switch (N.getOpcode()) {
1411 case ISD::FrameIndex:
1412 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base.Reg.Val == 0) {
1413 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
1414 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1418 case ISD::GlobalAddress:
1420 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
1421 // For Darwin, external and weak symbols are indirect, so we want to load
1422 // the value at address GV, not the value of GV itself. This means that
1423 // the GlobalAddress must be in the base or index register of the address,
1424 // not the GV offset field.
1425 if (Subtarget->getIndirectExternAndWeakGlobals() &&
1426 (GV->hasWeakLinkage() || GV->isExternal())) {
1435 AM.Disp += cast<ConstantSDNode>(N)->getValue();
1438 // We might have folded the load into this shift, so don't regen the value
1440 if (ExprMap.count(N)) break;
1442 if (AM.IndexReg.Val == 0 && AM.Scale == 1)
1443 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
1444 unsigned Val = CN->getValue();
1445 if (Val == 1 || Val == 2 || Val == 3) {
1446 AM.Scale = 1 << Val;
1447 SDOperand ShVal = N.Val->getOperand(0);
1449 // Okay, we know that we have a scale by now. However, if the scaled
1450 // value is an add of something and a constant, we can fold the
1451 // constant into the disp field here.
1452 if (ShVal.Val->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
1453 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
1454 AM.IndexReg = ShVal.Val->getOperand(0);
1455 ConstantSDNode *AddVal =
1456 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
1457 AM.Disp += AddVal->getValue() << Val;
1459 AM.IndexReg = ShVal;
1466 // We might have folded the load into this mul, so don't regen the value if
1468 if (ExprMap.count(N)) break;
1470 // X*[3,5,9] -> X+X*[2,4,8]
1471 if (AM.IndexReg.Val == 0 && AM.BaseType == X86ISelAddressMode::RegBase &&
1472 AM.Base.Reg.Val == 0)
1473 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
1474 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
1475 AM.Scale = unsigned(CN->getValue())-1;
1477 SDOperand MulVal = N.Val->getOperand(0);
1480 // Okay, we know that we have a scale by now. However, if the scaled
1481 // value is an add of something and a constant, we can fold the
1482 // constant into the disp field here.
1483 if (MulVal.Val->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1484 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
1485 Reg = MulVal.Val->getOperand(0);
1486 ConstantSDNode *AddVal =
1487 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
1488 AM.Disp += AddVal->getValue() * CN->getValue();
1490 Reg = N.Val->getOperand(0);
1493 AM.IndexReg = AM.Base.Reg = Reg;
1499 // We might have folded the load into this mul, so don't regen the value if
1501 if (ExprMap.count(N)) break;
1503 X86ISelAddressMode Backup = AM;
1504 if (!MatchAddress(N.Val->getOperand(0), AM) &&
1505 !MatchAddress(N.Val->getOperand(1), AM))
1508 if (!MatchAddress(N.Val->getOperand(1), AM) &&
1509 !MatchAddress(N.Val->getOperand(0), AM))
1516 // Is the base register already occupied?
1517 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
1518 // If so, check to see if the scale index register is set.
1519 if (AM.IndexReg.Val == 0) {
1525 // Otherwise, we cannot select it.
1529 // Default, generate it as a register.
1530 AM.BaseType = X86ISelAddressMode::RegBase;
1535 /// Emit2SetCCsAndLogical - Emit the following sequence of instructions,
1536 /// assuming that the temporary registers are in the 8-bit register class.
1540 /// DestReg = logicalop Tmp1, Tmp2
1542 static void Emit2SetCCsAndLogical(MachineBasicBlock *BB, unsigned SetCC1,
1543 unsigned SetCC2, unsigned LogicalOp,
1545 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
1546 unsigned Tmp1 = RegMap->createVirtualRegister(X86::R8RegisterClass);
1547 unsigned Tmp2 = RegMap->createVirtualRegister(X86::R8RegisterClass);
1548 BuildMI(BB, SetCC1, 0, Tmp1);
1549 BuildMI(BB, SetCC2, 0, Tmp2);
1550 BuildMI(BB, LogicalOp, 2, DestReg).addReg(Tmp1).addReg(Tmp2);
1553 /// EmitSetCC - Emit the code to set the specified 8-bit register to 1 if the
1554 /// condition codes match the specified SetCCOpcode. Note that some conditions
1555 /// require multiple instructions to generate the correct value.
1556 static void EmitSetCC(MachineBasicBlock *BB, unsigned DestReg,
1557 ISD::CondCode SetCCOpcode, bool isFP) {
1560 switch (SetCCOpcode) {
1561 default: assert(0 && "Illegal integer SetCC!");
1562 case ISD::SETEQ: Opc = X86::SETEr; break;
1563 case ISD::SETGT: Opc = X86::SETGr; break;
1564 case ISD::SETGE: Opc = X86::SETGEr; break;
1565 case ISD::SETLT: Opc = X86::SETLr; break;
1566 case ISD::SETLE: Opc = X86::SETLEr; break;
1567 case ISD::SETNE: Opc = X86::SETNEr; break;
1568 case ISD::SETULT: Opc = X86::SETBr; break;
1569 case ISD::SETUGT: Opc = X86::SETAr; break;
1570 case ISD::SETULE: Opc = X86::SETBEr; break;
1571 case ISD::SETUGE: Opc = X86::SETAEr; break;
1574 // On a floating point condition, the flags are set as follows:
1576 // 0 | 0 | 0 | X > Y
1577 // 0 | 0 | 1 | X < Y
1578 // 1 | 0 | 0 | X == Y
1579 // 1 | 1 | 1 | unordered
1581 switch (SetCCOpcode) {
1582 default: assert(0 && "Invalid FP setcc!");
1585 Opc = X86::SETEr; // True if ZF = 1
1589 Opc = X86::SETAr; // True if CF = 0 and ZF = 0
1593 Opc = X86::SETAEr; // True if CF = 0
1597 Opc = X86::SETBr; // True if CF = 1
1601 Opc = X86::SETBEr; // True if CF = 1 or ZF = 1
1605 Opc = X86::SETNEr; // True if ZF = 0
1608 Opc = X86::SETPr; // True if PF = 1
1611 Opc = X86::SETNPr; // True if PF = 0
1613 case ISD::SETOEQ: // !PF & ZF
1614 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETEr, X86::AND8rr, DestReg);
1616 case ISD::SETOLT: // !PF & CF
1617 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBr, X86::AND8rr, DestReg);
1619 case ISD::SETOLE: // !PF & (CF || ZF)
1620 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBEr, X86::AND8rr, DestReg);
1622 case ISD::SETUGT: // PF | (!ZF & !CF)
1623 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAr, X86::OR8rr, DestReg);
1625 case ISD::SETUGE: // PF | !CF
1626 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAEr, X86::OR8rr, DestReg);
1628 case ISD::SETUNE: // PF | !ZF
1629 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETNEr, X86::OR8rr, DestReg);
1633 BuildMI(BB, Opc, 0, DestReg);
1637 /// EmitBranchCC - Emit code into BB that arranges for control to transfer to
1638 /// the Dest block if the Cond condition is true. If we cannot fold this
1639 /// condition into the branch, return true.
1641 bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain,
1643 // FIXME: Evaluate whether it would be good to emit code like (X < Y) | (A >
1644 // B) using two conditional branches instead of one condbr, two setcc's, and
1646 if ((Cond.getOpcode() == ISD::OR ||
1647 Cond.getOpcode() == ISD::AND) && Cond.Val->hasOneUse()) {
1648 // And and or set the flags for us, so there is no need to emit a TST of the
1649 // result. It is only safe to do this if there is only a single use of the
1650 // AND/OR though, otherwise we don't know it will be emitted here.
1653 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
1657 // Codegen br not C -> JE.
1658 if (Cond.getOpcode() == ISD::XOR)
1659 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(Cond.Val->getOperand(1)))
1660 if (NC->isAllOnesValue()) {
1662 if (getRegPressure(Chain) > getRegPressure(Cond)) {
1664 CondR = SelectExpr(Cond.Val->getOperand(0));
1666 CondR = SelectExpr(Cond.Val->getOperand(0));
1669 BuildMI(BB, X86::TEST8rr, 2).addReg(CondR).addReg(CondR);
1670 BuildMI(BB, X86::JE, 1).addMBB(Dest);
1674 if (Cond.getOpcode() != ISD::SETCC)
1675 return true; // Can only handle simple setcc's so far.
1676 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
1680 // Handle integer conditions first.
1681 if (MVT::isInteger(Cond.getOperand(0).getValueType())) {
1683 default: assert(0 && "Illegal integer SetCC!");
1684 case ISD::SETEQ: Opc = X86::JE; break;
1685 case ISD::SETGT: Opc = X86::JG; break;
1686 case ISD::SETGE: Opc = X86::JGE; break;
1687 case ISD::SETLT: Opc = X86::JL; break;
1688 case ISD::SETLE: Opc = X86::JLE; break;
1689 case ISD::SETNE: Opc = X86::JNE; break;
1690 case ISD::SETULT: Opc = X86::JB; break;
1691 case ISD::SETUGT: Opc = X86::JA; break;
1692 case ISD::SETULE: Opc = X86::JBE; break;
1693 case ISD::SETUGE: Opc = X86::JAE; break;
1696 EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.hasOneUse());
1697 BuildMI(BB, Opc, 1).addMBB(Dest);
1701 unsigned Opc2 = 0; // Second branch if needed.
1703 // On a floating point condition, the flags are set as follows:
1705 // 0 | 0 | 0 | X > Y
1706 // 0 | 0 | 1 | X < Y
1707 // 1 | 0 | 0 | X == Y
1708 // 1 | 1 | 1 | unordered
1711 default: assert(0 && "Invalid FP setcc!");
1713 case ISD::SETEQ: Opc = X86::JE; break; // True if ZF = 1
1715 case ISD::SETGT: Opc = X86::JA; break; // True if CF = 0 and ZF = 0
1717 case ISD::SETGE: Opc = X86::JAE; break; // True if CF = 0
1719 case ISD::SETLT: Opc = X86::JB; break; // True if CF = 1
1721 case ISD::SETLE: Opc = X86::JBE; break; // True if CF = 1 or ZF = 1
1723 case ISD::SETNE: Opc = X86::JNE; break; // True if ZF = 0
1724 case ISD::SETUO: Opc = X86::JP; break; // True if PF = 1
1725 case ISD::SETO: Opc = X86::JNP; break; // True if PF = 0
1726 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
1727 Opc = X86::JA; // ZF = 0 & CF = 0
1728 Opc2 = X86::JP; // PF = 1
1730 case ISD::SETUGE: // PF = 1 | CF = 0
1731 Opc = X86::JAE; // CF = 0
1732 Opc2 = X86::JP; // PF = 1
1734 case ISD::SETUNE: // PF = 1 | ZF = 0
1735 Opc = X86::JNE; // ZF = 0
1736 Opc2 = X86::JP; // PF = 1
1738 case ISD::SETOEQ: // PF = 0 & ZF = 1
1741 return true; // FIXME: Emit more efficient code for this branch.
1742 case ISD::SETOLT: // PF = 0 & CF = 1
1745 return true; // FIXME: Emit more efficient code for this branch.
1746 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
1747 //X86::JNP, X86::JBE
1749 return true; // FIXME: Emit more efficient code for this branch.
1753 EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.hasOneUse());
1754 BuildMI(BB, Opc, 1).addMBB(Dest);
1756 BuildMI(BB, Opc2, 1).addMBB(Dest);
1760 /// EmitSelectCC - Emit code into BB that performs a select operation between
1761 /// the two registers RTrue and RFalse, generating a result into RDest.
1763 void ISel::EmitSelectCC(SDOperand Cond, SDOperand True, SDOperand False,
1764 MVT::ValueType SVT, unsigned RDest) {
1765 unsigned RTrue, RFalse;
1767 EQ, NE, LT, LE, GT, GE, B, BE, A, AE, P, NP,
1769 } CondCode = NOT_SET;
1771 static const unsigned CMOVTAB16[] = {
1772 X86::CMOVE16rr, X86::CMOVNE16rr, X86::CMOVL16rr, X86::CMOVLE16rr,
1773 X86::CMOVG16rr, X86::CMOVGE16rr, X86::CMOVB16rr, X86::CMOVBE16rr,
1774 X86::CMOVA16rr, X86::CMOVAE16rr, X86::CMOVP16rr, X86::CMOVNP16rr,
1776 static const unsigned CMOVTAB32[] = {
1777 X86::CMOVE32rr, X86::CMOVNE32rr, X86::CMOVL32rr, X86::CMOVLE32rr,
1778 X86::CMOVG32rr, X86::CMOVGE32rr, X86::CMOVB32rr, X86::CMOVBE32rr,
1779 X86::CMOVA32rr, X86::CMOVAE32rr, X86::CMOVP32rr, X86::CMOVNP32rr,
1781 static const unsigned CMOVTABFP[] = {
1782 X86::FCMOVE , X86::FCMOVNE, /*missing*/0, /*missing*/0,
1783 /*missing*/0, /*missing*/0, X86::FCMOVB , X86::FCMOVBE,
1784 X86::FCMOVA , X86::FCMOVAE, X86::FCMOVP , X86::FCMOVNP
1786 static const int SSE_CMOVTAB[] = {
1787 /*CMPEQ*/ 0, /*CMPNEQ*/ 4, /*missing*/ 0, /*missing*/ 0,
1788 /*missing*/ 0, /*missing*/ 0, /*CMPLT*/ 1, /*CMPLE*/ 2,
1789 /*CMPNLE*/ 6, /*CMPNLT*/ 5, /*CMPUNORD*/ 3, /*CMPORD*/ 7
1792 if (Cond.getOpcode() == ISD::SETCC) {
1793 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
1794 if (MVT::isInteger(Cond.getOperand(0).getValueType())) {
1796 default: assert(0 && "Unknown integer comparison!");
1797 case ISD::SETEQ: CondCode = EQ; break;
1798 case ISD::SETGT: CondCode = GT; break;
1799 case ISD::SETGE: CondCode = GE; break;
1800 case ISD::SETLT: CondCode = LT; break;
1801 case ISD::SETLE: CondCode = LE; break;
1802 case ISD::SETNE: CondCode = NE; break;
1803 case ISD::SETULT: CondCode = B; break;
1804 case ISD::SETUGT: CondCode = A; break;
1805 case ISD::SETULE: CondCode = BE; break;
1806 case ISD::SETUGE: CondCode = AE; break;
1809 // On a floating point condition, the flags are set as follows:
1811 // 0 | 0 | 0 | X > Y
1812 // 0 | 0 | 1 | X < Y
1813 // 1 | 0 | 0 | X == Y
1814 // 1 | 1 | 1 | unordered
1817 default: assert(0 && "Unknown FP comparison!");
1819 case ISD::SETEQ: CondCode = EQ; break; // True if ZF = 1
1821 case ISD::SETGT: CondCode = A; break; // True if CF = 0 and ZF = 0
1823 case ISD::SETGE: CondCode = AE; break; // True if CF = 0
1825 case ISD::SETLT: CondCode = B; break; // True if CF = 1
1827 case ISD::SETLE: CondCode = BE; break; // True if CF = 1 or ZF = 1
1829 case ISD::SETNE: CondCode = NE; break; // True if ZF = 0
1830 case ISD::SETUO: CondCode = P; break; // True if PF = 1
1831 case ISD::SETO: CondCode = NP; break; // True if PF = 0
1832 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
1833 case ISD::SETUGE: // PF = 1 | CF = 0
1834 case ISD::SETUNE: // PF = 1 | ZF = 0
1835 case ISD::SETOEQ: // PF = 0 & ZF = 1
1836 case ISD::SETOLT: // PF = 0 & CF = 1
1837 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
1838 // We cannot emit this comparison as a single cmov.
1844 // There's no SSE equivalent of FCMOVE. For cases where we set a condition
1845 // code above and one of the results of the select is +0.0, then we can fake
1846 // it up through a clever AND with mask. Otherwise, we will fall through to
1847 // the code below that will use a PHI node to select the right value.
1848 if (X86ScalarSSE && (SVT == MVT::f32 || SVT == MVT::f64)) {
1849 if (Cond.getOperand(0).getValueType() == SVT &&
1850 NOT_SET != CondCode) {
1851 ConstantFPSDNode *CT = dyn_cast<ConstantFPSDNode>(True);
1852 ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(False);
1853 bool TrueZero = CT && CT->isExactlyValue(0.0);
1854 bool FalseZero = CF && CF->isExactlyValue(0.0);
1855 if (TrueZero || FalseZero) {
1856 SDOperand LHS = Cond.getOperand(0);
1857 SDOperand RHS = Cond.getOperand(1);
1859 // Select the two halves of the condition
1860 unsigned RLHS, RRHS;
1861 if (getRegPressure(LHS) > getRegPressure(RHS)) {
1862 RLHS = SelectExpr(LHS);
1863 RRHS = SelectExpr(RHS);
1865 RRHS = SelectExpr(RHS);
1866 RLHS = SelectExpr(LHS);
1869 // Emit the comparison and generate a mask from it
1870 unsigned MaskReg = MakeReg(SVT);
1871 unsigned Opc = (SVT == MVT::f32) ? X86::CMPSSrr : X86::CMPSDrr;
1872 BuildMI(BB, Opc, 3, MaskReg).addReg(RLHS).addReg(RRHS)
1873 .addImm(SSE_CMOVTAB[CondCode]);
1876 RFalse = SelectExpr(False);
1877 Opc = (SVT == MVT::f32) ? X86::ANDNPSrr : X86::ANDNPDrr;
1878 BuildMI(BB, Opc, 2, RDest).addReg(MaskReg).addReg(RFalse);
1880 RTrue = SelectExpr(True);
1881 Opc = (SVT == MVT::f32) ? X86::ANDPSrr : X86::ANDPDrr;
1882 BuildMI(BB, Opc, 2, RDest).addReg(MaskReg).addReg(RTrue);
1890 // Select the true and false values for use in both the SSE PHI case, and the
1891 // integer or x87 cmov cases below.
1892 if (getRegPressure(True) > getRegPressure(False)) {
1893 RTrue = SelectExpr(True);
1894 RFalse = SelectExpr(False);
1896 RFalse = SelectExpr(False);
1897 RTrue = SelectExpr(True);
1900 // Since there's no SSE equivalent of FCMOVE, and we couldn't generate an
1901 // AND with mask, we'll have to do the normal RISC thing and generate a PHI
1902 // node to select between the true and false values.
1903 if (X86ScalarSSE && (SVT == MVT::f32 || SVT == MVT::f64)) {
1904 // FIXME: emit a direct compare and branch rather than setting a cond reg
1906 unsigned CondReg = SelectExpr(Cond);
1907 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1909 // Create an iterator with which to insert the MBB for copying the false
1910 // value and the MBB to hold the PHI instruction for this SetCC.
1911 MachineBasicBlock *thisMBB = BB;
1912 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1913 ilist<MachineBasicBlock>::iterator It = BB;
1919 // cmpTY ccX, r1, r2
1921 // fallthrough --> copy0MBB
1922 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
1923 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
1924 BuildMI(BB, X86::JNE, 1).addMBB(sinkMBB);
1925 MachineFunction *F = BB->getParent();
1926 F->getBasicBlockList().insert(It, copy0MBB);
1927 F->getBasicBlockList().insert(It, sinkMBB);
1928 // Update machine-CFG edges
1929 BB->addSuccessor(copy0MBB);
1930 BB->addSuccessor(sinkMBB);
1933 // %FalseValue = ...
1934 // # fallthrough to sinkMBB
1936 // Update machine-CFG edges
1937 BB->addSuccessor(sinkMBB);
1940 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1943 BuildMI(BB, X86::PHI, 4, RDest).addReg(RFalse)
1944 .addMBB(copy0MBB).addReg(RTrue).addMBB(thisMBB);
1949 if (CondCode != NOT_SET) {
1951 default: assert(0 && "Cannot select this type!");
1952 case MVT::i16: Opc = CMOVTAB16[CondCode]; break;
1953 case MVT::i32: Opc = CMOVTAB32[CondCode]; break;
1954 case MVT::f64: Opc = CMOVTABFP[CondCode]; break;
1958 // Finally, if we weren't able to fold this, just emit the condition and test
1960 if (CondCode == NOT_SET || Opc == 0) {
1961 // Get the condition into the zero flag.
1962 unsigned CondReg = SelectExpr(Cond);
1963 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
1966 default: assert(0 && "Cannot select this type!");
1967 case MVT::i16: Opc = X86::CMOVE16rr; break;
1968 case MVT::i32: Opc = X86::CMOVE32rr; break;
1969 case MVT::f64: Opc = X86::FCMOVE; break;
1972 // FIXME: CMP R, 0 -> TEST R, R
1973 EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.Val->hasOneUse());
1974 std::swap(RTrue, RFalse);
1976 BuildMI(BB, Opc, 2, RDest).addReg(RTrue).addReg(RFalse);
1979 void ISel::EmitCMP(SDOperand LHS, SDOperand RHS, bool HasOneUse) {
1981 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
1983 if (HasOneUse && isFoldableLoad(LHS, RHS)) {
1984 switch (RHS.getValueType()) {
1987 case MVT::i8: Opc = X86::CMP8mi; break;
1988 case MVT::i16: Opc = X86::CMP16mi; break;
1989 case MVT::i32: Opc = X86::CMP32mi; break;
1993 EmitFoldedLoad(LHS, AM);
1994 addFullAddress(BuildMI(BB, Opc, 5), AM).addImm(CN->getValue());
1999 switch (RHS.getValueType()) {
2002 case MVT::i8: Opc = X86::CMP8ri; break;
2003 case MVT::i16: Opc = X86::CMP16ri; break;
2004 case MVT::i32: Opc = X86::CMP32ri; break;
2007 unsigned Tmp1 = SelectExpr(LHS);
2008 BuildMI(BB, Opc, 2).addReg(Tmp1).addImm(CN->getValue());
2011 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(RHS)) {
2012 if (!X86ScalarSSE && (CN->isExactlyValue(+0.0) ||
2013 CN->isExactlyValue(-0.0))) {
2014 unsigned Reg = SelectExpr(LHS);
2015 BuildMI(BB, X86::FTST, 1).addReg(Reg);
2016 BuildMI(BB, X86::FNSTSW8r, 0);
2017 BuildMI(BB, X86::SAHF, 1);
2023 if (HasOneUse && isFoldableLoad(LHS, RHS)) {
2024 switch (RHS.getValueType()) {
2027 case MVT::i8: Opc = X86::CMP8mr; break;
2028 case MVT::i16: Opc = X86::CMP16mr; break;
2029 case MVT::i32: Opc = X86::CMP32mr; break;
2033 EmitFoldedLoad(LHS, AM);
2034 unsigned Reg = SelectExpr(RHS);
2035 addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(Reg);
2040 switch (LHS.getValueType()) {
2041 default: assert(0 && "Cannot compare this value!");
2043 case MVT::i8: Opc = X86::CMP8rr; break;
2044 case MVT::i16: Opc = X86::CMP16rr; break;
2045 case MVT::i32: Opc = X86::CMP32rr; break;
2046 case MVT::f32: Opc = X86::UCOMISSrr; break;
2047 case MVT::f64: Opc = X86ScalarSSE ? X86::UCOMISDrr : X86::FUCOMIr; break;
2049 unsigned Tmp1, Tmp2;
2050 if (getRegPressure(LHS) > getRegPressure(RHS)) {
2051 Tmp1 = SelectExpr(LHS);
2052 Tmp2 = SelectExpr(RHS);
2054 Tmp2 = SelectExpr(RHS);
2055 Tmp1 = SelectExpr(LHS);
2057 BuildMI(BB, Opc, 2).addReg(Tmp1).addReg(Tmp2);
2060 /// isFoldableLoad - Return true if this is a load instruction that can safely
2061 /// be folded into an operation that uses it.
2062 bool ISel::isFoldableLoad(SDOperand Op, SDOperand OtherOp, bool FloatPromoteOk){
2063 if (Op.getOpcode() == ISD::LOAD) {
2064 // FIXME: currently can't fold constant pool indexes.
2065 if (isa<ConstantPoolSDNode>(Op.getOperand(1)))
2067 } else if (FloatPromoteOk && Op.getOpcode() == ISD::EXTLOAD &&
2068 cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::f32) {
2069 // FIXME: currently can't fold constant pool indexes.
2070 if (isa<ConstantPoolSDNode>(Op.getOperand(1)))
2076 // If this load has already been emitted, we clearly can't fold it.
2077 assert(Op.ResNo == 0 && "Not a use of the value of the load?");
2078 if (ExprMap.count(Op.getValue(1))) return false;
2079 assert(!ExprMap.count(Op.getValue(0)) && "Value in map but not token chain?");
2080 assert(!ExprMap.count(Op.getValue(1))&&"Token lowered but value not in map?");
2082 // If there is not just one use of its value, we cannot fold.
2083 if (!Op.Val->hasNUsesOfValue(1, 0)) return false;
2085 // Finally, we cannot fold the load into the operation if this would induce a
2086 // cycle into the resultant dag. To check for this, see if OtherOp (the other
2087 // operand of the operation we are folding the load into) can possible use the
2088 // chain node defined by the load.
2089 if (OtherOp.Val && !Op.Val->hasNUsesOfValue(0, 1)) { // Has uses of chain?
2090 std::set<SDNode*> Visited;
2091 if (NodeTransitivelyUsesValue(OtherOp, Op.getValue(1), Visited))
2098 /// EmitFoldedLoad - Ensure that the arguments of the load are code generated,
2099 /// and compute the address being loaded into AM.
2100 void ISel::EmitFoldedLoad(SDOperand Op, X86AddressMode &AM) {
2101 SDOperand Chain = Op.getOperand(0);
2102 SDOperand Address = Op.getOperand(1);
2104 if (getRegPressure(Chain) > getRegPressure(Address)) {
2106 SelectAddress(Address, AM);
2108 SelectAddress(Address, AM);
2112 // The chain for this load is now lowered.
2113 assert(ExprMap.count(SDOperand(Op.Val, 1)) == 0 &&
2114 "Load emitted more than once?");
2115 if (!ExprMap.insert(std::make_pair(Op.getValue(1), 1)).second)
2116 assert(0 && "Load emitted more than once!");
2119 // EmitOrOpOp - Pattern match the expression (Op1|Op2), where we know that op1
2120 // and op2 are i8/i16/i32 values with one use each (the or). If we can form a
2121 // SHLD or SHRD, emit the instruction (generating the value into DestReg) and
2123 bool ISel::EmitOrOpOp(SDOperand Op1, SDOperand Op2, unsigned DestReg) {
2124 if (Op1.getOpcode() == ISD::SHL && Op2.getOpcode() == ISD::SRL) {
2126 } else if (Op2.getOpcode() == ISD::SHL && Op1.getOpcode() == ISD::SRL) {
2127 std::swap(Op1, Op2); // Op1 is the SHL now.
2129 return false; // No match
2132 SDOperand ShlVal = Op1.getOperand(0);
2133 SDOperand ShlAmt = Op1.getOperand(1);
2134 SDOperand ShrVal = Op2.getOperand(0);
2135 SDOperand ShrAmt = Op2.getOperand(1);
2137 unsigned RegSize = MVT::getSizeInBits(Op1.getValueType());
2139 // Find out if ShrAmt = 32-ShlAmt or ShlAmt = 32-ShrAmt.
2140 if (ShlAmt.getOpcode() == ISD::SUB && ShlAmt.getOperand(1) == ShrAmt)
2141 if (ConstantSDNode *SubCST = dyn_cast<ConstantSDNode>(ShlAmt.getOperand(0)))
2142 if (SubCST->getValue() == RegSize) {
2143 // (A >> ShrAmt) | (A << (32-ShrAmt)) ==> ROR A, ShrAmt
2144 // (A >> ShrAmt) | (B << (32-ShrAmt)) ==> SHRD A, B, ShrAmt
2145 if (ShrVal == ShlVal) {
2146 unsigned Reg, ShAmt;
2147 if (getRegPressure(ShrVal) > getRegPressure(ShrAmt)) {
2148 Reg = SelectExpr(ShrVal);
2149 ShAmt = SelectExpr(ShrAmt);
2151 ShAmt = SelectExpr(ShrAmt);
2152 Reg = SelectExpr(ShrVal);
2154 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2155 unsigned Opc = RegSize == 8 ? X86::ROR8rCL :
2156 (RegSize == 16 ? X86::ROR16rCL : X86::ROR32rCL);
2157 BuildMI(BB, Opc, 1, DestReg).addReg(Reg);
2159 } else if (RegSize != 8) {
2160 unsigned AReg, BReg;
2161 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
2162 BReg = SelectExpr(ShlVal);
2163 AReg = SelectExpr(ShrVal);
2165 AReg = SelectExpr(ShrVal);
2166 BReg = SelectExpr(ShlVal);
2168 unsigned ShAmt = SelectExpr(ShrAmt);
2169 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2170 unsigned Opc = RegSize == 16 ? X86::SHRD16rrCL : X86::SHRD32rrCL;
2171 BuildMI(BB, Opc, 2, DestReg).addReg(AReg).addReg(BReg);
2176 if (ShrAmt.getOpcode() == ISD::SUB && ShrAmt.getOperand(1) == ShlAmt)
2177 if (ConstantSDNode *SubCST = dyn_cast<ConstantSDNode>(ShrAmt.getOperand(0)))
2178 if (SubCST->getValue() == RegSize) {
2179 // (A << ShlAmt) | (A >> (32-ShlAmt)) ==> ROL A, ShrAmt
2180 // (A << ShlAmt) | (B >> (32-ShlAmt)) ==> SHLD A, B, ShrAmt
2181 if (ShrVal == ShlVal) {
2182 unsigned Reg, ShAmt;
2183 if (getRegPressure(ShrVal) > getRegPressure(ShlAmt)) {
2184 Reg = SelectExpr(ShrVal);
2185 ShAmt = SelectExpr(ShlAmt);
2187 ShAmt = SelectExpr(ShlAmt);
2188 Reg = SelectExpr(ShrVal);
2190 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2191 unsigned Opc = RegSize == 8 ? X86::ROL8rCL :
2192 (RegSize == 16 ? X86::ROL16rCL : X86::ROL32rCL);
2193 BuildMI(BB, Opc, 1, DestReg).addReg(Reg);
2195 } else if (RegSize != 8) {
2196 unsigned AReg, BReg;
2197 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
2198 AReg = SelectExpr(ShlVal);
2199 BReg = SelectExpr(ShrVal);
2201 BReg = SelectExpr(ShrVal);
2202 AReg = SelectExpr(ShlVal);
2204 unsigned ShAmt = SelectExpr(ShlAmt);
2205 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShAmt);
2206 unsigned Opc = RegSize == 16 ? X86::SHLD16rrCL : X86::SHLD32rrCL;
2207 BuildMI(BB, Opc, 2, DestReg).addReg(AReg).addReg(BReg);
2212 if (ConstantSDNode *ShrCst = dyn_cast<ConstantSDNode>(ShrAmt))
2213 if (ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(ShlAmt))
2214 if (ShrCst->getValue() < RegSize && ShlCst->getValue() < RegSize)
2215 if (ShrCst->getValue() == RegSize-ShlCst->getValue()) {
2216 // (A >> 5) | (A << 27) --> ROR A, 5
2217 // (A >> 5) | (B << 27) --> SHRD A, B, 5
2218 if (ShrVal == ShlVal) {
2219 unsigned Reg = SelectExpr(ShrVal);
2220 unsigned Opc = RegSize == 8 ? X86::ROR8ri :
2221 (RegSize == 16 ? X86::ROR16ri : X86::ROR32ri);
2222 BuildMI(BB, Opc, 2, DestReg).addReg(Reg).addImm(ShrCst->getValue());
2224 } else if (RegSize != 8) {
2225 unsigned AReg, BReg;
2226 if (getRegPressure(ShlVal) > getRegPressure(ShrVal)) {
2227 BReg = SelectExpr(ShlVal);
2228 AReg = SelectExpr(ShrVal);
2230 AReg = SelectExpr(ShrVal);
2231 BReg = SelectExpr(ShlVal);
2233 unsigned Opc = RegSize == 16 ? X86::SHRD16rri8 : X86::SHRD32rri8;
2234 BuildMI(BB, Opc, 3, DestReg).addReg(AReg).addReg(BReg)
2235 .addImm(ShrCst->getValue());
2243 unsigned ISel::SelectExpr(SDOperand N) {
2245 unsigned Tmp1, Tmp2, Tmp3;
2247 SDNode *Node = N.Val;
2250 if (Node->getOpcode() == ISD::CopyFromReg) {
2251 if (MRegisterInfo::isVirtualRegister(cast<RegSDNode>(Node)->getReg()) ||
2252 cast<RegSDNode>(Node)->getReg() == X86::ESP) {
2253 // Just use the specified register as our input.
2254 return cast<RegSDNode>(Node)->getReg();
2258 unsigned &Reg = ExprMap[N];
2259 if (Reg) return Reg;
2261 switch (N.getOpcode()) {
2263 Reg = Result = (N.getValueType() != MVT::Other) ?
2264 MakeReg(N.getValueType()) : 1;
2266 case X86ISD::TAILCALL:
2268 // If this is a call instruction, make sure to prepare ALL of the result
2269 // values as well as the chain.
2270 ExprMap[N.getValue(0)] = 1;
2271 if (Node->getNumValues() > 1) {
2272 Result = MakeReg(Node->getValueType(1));
2273 ExprMap[N.getValue(1)] = Result;
2274 for (unsigned i = 2, e = Node->getNumValues(); i != e; ++i)
2275 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
2280 case ISD::ADD_PARTS:
2281 case ISD::SUB_PARTS:
2282 case ISD::SHL_PARTS:
2283 case ISD::SRL_PARTS:
2284 case ISD::SRA_PARTS:
2285 Result = MakeReg(Node->getValueType(0));
2286 ExprMap[N.getValue(0)] = Result;
2287 for (unsigned i = 1, e = N.Val->getNumValues(); i != e; ++i)
2288 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
2292 switch (N.getOpcode()) {
2295 assert(0 && "Node not handled!\n");
2296 case ISD::FP_EXTEND:
2297 assert(X86ScalarSSE && "Scalar SSE FP must be enabled to use f32");
2298 Tmp1 = SelectExpr(N.getOperand(0));
2299 BuildMI(BB, X86::CVTSS2SDrr, 1, Result).addReg(Tmp1);
2302 assert(X86ScalarSSE && "Scalar SSE FP must be enabled to use f32");
2303 Tmp1 = SelectExpr(N.getOperand(0));
2304 BuildMI(BB, X86::CVTSD2SSrr, 1, Result).addReg(Tmp1);
2306 case ISD::CopyFromReg:
2307 Select(N.getOperand(0));
2309 Reg = Result = ExprMap[N.getValue(0)] =
2310 MakeReg(N.getValue(0).getValueType());
2312 switch (Node->getValueType(0)) {
2313 default: assert(0 && "Cannot CopyFromReg this!");
2316 BuildMI(BB, X86::MOV8rr, 1,
2317 Result).addReg(cast<RegSDNode>(Node)->getReg());
2320 BuildMI(BB, X86::MOV16rr, 1,
2321 Result).addReg(cast<RegSDNode>(Node)->getReg());
2324 BuildMI(BB, X86::MOV32rr, 1,
2325 Result).addReg(cast<RegSDNode>(Node)->getReg());
2329 case ISD::FrameIndex:
2330 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
2331 addFrameReference(BuildMI(BB, X86::LEA32r, 4, Result), (int)Tmp1);
2333 case ISD::ConstantPool:
2334 Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
2335 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 4, Result), Tmp1);
2337 case ISD::ConstantFP:
2339 assert(cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) &&
2340 "SSE only supports +0.0");
2341 Opc = (N.getValueType() == MVT::f32) ? X86::FLD0SS : X86::FLD0SD;
2342 BuildMI(BB, Opc, 0, Result);
2345 ContainsFPCode = true;
2346 Tmp1 = Result; // Intermediate Register
2347 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
2348 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
2349 Tmp1 = MakeReg(MVT::f64);
2351 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
2352 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
2353 BuildMI(BB, X86::FLD0, 0, Tmp1);
2354 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
2355 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
2356 BuildMI(BB, X86::FLD1, 0, Tmp1);
2358 assert(0 && "Unexpected constant!");
2360 BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1);
2363 switch (N.getValueType()) {
2364 default: assert(0 && "Cannot use constants of this type!");
2366 case MVT::i8: Opc = X86::MOV8ri; break;
2367 case MVT::i16: Opc = X86::MOV16ri; break;
2368 case MVT::i32: Opc = X86::MOV32ri; break;
2370 BuildMI(BB, Opc, 1,Result).addImm(cast<ConstantSDNode>(N)->getValue());
2373 if (Node->getValueType(0) == MVT::f64) {
2374 // FIXME: SHOULD TEACH STACKIFIER ABOUT UNDEF VALUES!
2375 BuildMI(BB, X86::FLD0, 0, Result);
2377 BuildMI(BB, X86::IMPLICIT_DEF, 0, Result);
2380 case ISD::GlobalAddress: {
2381 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
2382 // For Darwin, external and weak symbols are indirect, so we want to load
2383 // the value at address GV, not the value of GV itself.
2384 if (Subtarget->getIndirectExternAndWeakGlobals() &&
2385 (GV->hasWeakLinkage() || GV->isExternal())) {
2386 BuildMI(BB, X86::MOV32rm, 4, Result).addReg(0).addZImm(1).addReg(0)
2387 .addGlobalAddress(GV, false, 0);
2389 BuildMI(BB, X86::MOV32ri, 1, Result).addGlobalAddress(GV);
2393 case ISD::ExternalSymbol: {
2394 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
2395 BuildMI(BB, X86::MOV32ri, 1, Result).addExternalSymbol(Sym);
2398 case ISD::ZERO_EXTEND: {
2399 int DestIs16 = N.getValueType() == MVT::i16;
2400 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
2402 // FIXME: This hack is here for zero extension casts from bool to i8. This
2403 // would not be needed if bools were promoted by Legalize.
2404 if (N.getValueType() == MVT::i8) {
2405 Tmp1 = SelectExpr(N.getOperand(0));
2406 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(Tmp1);
2410 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
2411 static const unsigned Opc[3] = {
2412 X86::MOVZX32rm8, X86::MOVZX32rm16, X86::MOVZX16rm8
2416 EmitFoldedLoad(N.getOperand(0), AM);
2417 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
2422 static const unsigned Opc[3] = {
2423 X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOVZX16rr8
2425 Tmp1 = SelectExpr(N.getOperand(0));
2426 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
2429 case ISD::SIGN_EXTEND: {
2430 int DestIs16 = N.getValueType() == MVT::i16;
2431 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
2433 // FIXME: Legalize should promote bools to i8!
2434 assert(N.getOperand(0).getValueType() != MVT::i1 &&
2435 "Sign extend from bool not implemented!");
2437 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
2438 static const unsigned Opc[3] = {
2439 X86::MOVSX32rm8, X86::MOVSX32rm16, X86::MOVSX16rm8
2443 EmitFoldedLoad(N.getOperand(0), AM);
2444 addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM);
2448 static const unsigned Opc[3] = {
2449 X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOVSX16rr8
2451 Tmp1 = SelectExpr(N.getOperand(0));
2452 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
2456 // Fold TRUNCATE (LOAD P) into a smaller load from P.
2457 // FIXME: This should be performed by the DAGCombiner.
2458 if (isFoldableLoad(N.getOperand(0), SDOperand())) {
2459 switch (N.getValueType()) {
2460 default: assert(0 && "Unknown truncate!");
2462 case MVT::i8: Opc = X86::MOV8rm; break;
2463 case MVT::i16: Opc = X86::MOV16rm; break;
2466 EmitFoldedLoad(N.getOperand(0), AM);
2467 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
2471 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by
2472 // a move out of AX or AL.
2473 switch (N.getOperand(0).getValueType()) {
2474 default: assert(0 && "Unknown truncate!");
2475 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
2476 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
2477 case MVT::i32: Tmp2 = X86::EAX; Opc = X86::MOV32rr; break;
2479 Tmp1 = SelectExpr(N.getOperand(0));
2480 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
2482 switch (N.getValueType()) {
2483 default: assert(0 && "Unknown truncate!");
2485 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
2486 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
2488 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
2491 case ISD::SINT_TO_FP: {
2492 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
2493 unsigned PromoteOpcode = 0;
2495 // We can handle any sint to fp with the direct sse conversion instructions.
2497 Opc = (N.getValueType() == MVT::f64) ? X86::CVTSI2SDrr : X86::CVTSI2SSrr;
2498 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2502 ContainsFPCode = true;
2504 // Spill the integer to memory and reload it from there.
2505 MVT::ValueType SrcTy = N.getOperand(0).getValueType();
2506 unsigned Size = MVT::getSizeInBits(SrcTy)/8;
2507 MachineFunction *F = BB->getParent();
2508 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
2512 addFrameReference(BuildMI(BB, X86::MOV32mr, 5), FrameIdx).addReg(Tmp1);
2513 addFrameReference(BuildMI(BB, X86::FILD32m, 5, Result), FrameIdx);
2516 addFrameReference(BuildMI(BB, X86::MOV16mr, 5), FrameIdx).addReg(Tmp1);
2517 addFrameReference(BuildMI(BB, X86::FILD16m, 5, Result), FrameIdx);
2519 default: break; // No promotion required.
2523 case ISD::FP_TO_SINT:
2524 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
2526 // If the target supports SSE2 and is performing FP operations in SSE regs
2527 // instead of the FP stack, then we can use the efficient CVTSS2SI and
2528 // CVTSD2SI instructions.
2529 assert(X86ScalarSSE);
2530 if (MVT::f32 == N.getOperand(0).getValueType()) {
2531 BuildMI(BB, X86::CVTTSS2SIrr, 1, Result).addReg(Tmp1);
2532 } else if (MVT::f64 == N.getOperand(0).getValueType()) {
2533 BuildMI(BB, X86::CVTTSD2SIrr, 1, Result).addReg(Tmp1);
2535 assert(0 && "Not an f32 or f64?");
2541 Op0 = N.getOperand(0);
2542 Op1 = N.getOperand(1);
2544 if (isFoldableLoad(Op0, Op1, true)) {
2545 std::swap(Op0, Op1);
2549 if (isFoldableLoad(Op1, Op0, true)) {
2551 switch (N.getValueType()) {
2552 default: assert(0 && "Cannot add this type!");
2554 case MVT::i8: Opc = X86::ADD8rm; break;
2555 case MVT::i16: Opc = X86::ADD16rm; break;
2556 case MVT::i32: Opc = X86::ADD32rm; break;
2557 case MVT::f32: Opc = X86::ADDSSrm; break;
2559 // For F64, handle promoted load operations (from F32) as well!
2561 assert(Op1.getOpcode() == ISD::LOAD && "SSE load not promoted");
2564 Opc = Op1.getOpcode() == ISD::LOAD ? X86::FADD64m : X86::FADD32m;
2569 EmitFoldedLoad(Op1, AM);
2570 Tmp1 = SelectExpr(Op0);
2571 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2575 // See if we can codegen this as an LEA to fold operations together.
2576 if (N.getValueType() == MVT::i32) {
2578 X86ISelAddressMode AM;
2579 MatchAddress(N, AM);
2580 ExprMap[N] = Result;
2582 // If this is not just an add, emit the LEA. For a simple add (like
2583 // reg+reg or reg+imm), we just emit an add. It might be a good idea to
2584 // leave this as LEA, then peephole it to 'ADD' after two address elim
2586 if (AM.Scale != 1 || AM.BaseType == X86ISelAddressMode::FrameIndexBase||
2587 AM.GV || (AM.Base.Reg.Val && AM.IndexReg.Val && AM.Disp)) {
2588 X86AddressMode XAM = SelectAddrExprs(AM);
2589 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), XAM);
2594 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2596 if (CN->getValue() == 1) { // add X, 1 -> inc X
2597 switch (N.getValueType()) {
2598 default: assert(0 && "Cannot integer add this type!");
2599 case MVT::i8: Opc = X86::INC8r; break;
2600 case MVT::i16: Opc = X86::INC16r; break;
2601 case MVT::i32: Opc = X86::INC32r; break;
2603 } else if (CN->isAllOnesValue()) { // add X, -1 -> dec X
2604 switch (N.getValueType()) {
2605 default: assert(0 && "Cannot integer add this type!");
2606 case MVT::i8: Opc = X86::DEC8r; break;
2607 case MVT::i16: Opc = X86::DEC16r; break;
2608 case MVT::i32: Opc = X86::DEC32r; break;
2613 Tmp1 = SelectExpr(Op0);
2614 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2618 switch (N.getValueType()) {
2619 default: assert(0 && "Cannot add this type!");
2620 case MVT::i8: Opc = X86::ADD8ri; break;
2621 case MVT::i16: Opc = X86::ADD16ri; break;
2622 case MVT::i32: Opc = X86::ADD32ri; break;
2625 Tmp1 = SelectExpr(Op0);
2626 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
2631 switch (N.getValueType()) {
2632 default: assert(0 && "Cannot add this type!");
2633 case MVT::i8: Opc = X86::ADD8rr; break;
2634 case MVT::i16: Opc = X86::ADD16rr; break;
2635 case MVT::i32: Opc = X86::ADD32rr; break;
2636 case MVT::f32: Opc = X86::ADDSSrr; break;
2637 case MVT::f64: Opc = X86ScalarSSE ? X86::ADDSDrr : X86::FpADD; break;
2640 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2641 Tmp1 = SelectExpr(Op0);
2642 Tmp2 = SelectExpr(Op1);
2644 Tmp2 = SelectExpr(Op1);
2645 Tmp1 = SelectExpr(Op0);
2648 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
2652 Tmp1 = SelectExpr(Node->getOperand(0));
2654 Opc = (N.getValueType() == MVT::f32) ? X86::SQRTSSrr : X86::SQRTSDrr;
2655 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2657 BuildMI(BB, X86::FSQRT, 1, Result).addReg(Tmp1);
2662 // Once we can spill 16 byte constants into the constant pool, we can
2663 // implement SSE equivalents of FABS and FCHS.
2668 assert(N.getValueType()==MVT::f64 && "Illegal type for this operation");
2669 Tmp1 = SelectExpr(Node->getOperand(0));
2670 switch (N.getOpcode()) {
2671 default: assert(0 && "Unreachable!");
2672 case ISD::FABS: BuildMI(BB, X86::FABS, 1, Result).addReg(Tmp1); break;
2673 case ISD::FNEG: BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1); break;
2674 case ISD::FSIN: BuildMI(BB, X86::FSIN, 1, Result).addReg(Tmp1); break;
2675 case ISD::FCOS: BuildMI(BB, X86::FCOS, 1, Result).addReg(Tmp1); break;
2680 switch (N.getValueType()) {
2681 default: assert(0 && "Unsupported VT!");
2682 case MVT::i8: Tmp2 = X86::MUL8r; break;
2683 case MVT::i16: Tmp2 = X86::MUL16r; break;
2684 case MVT::i32: Tmp2 = X86::MUL32r; break;
2688 unsigned MovOpc, LowReg, HiReg;
2689 switch (N.getValueType()) {
2690 default: assert(0 && "Unsupported VT!");
2692 MovOpc = X86::MOV8rr;
2698 MovOpc = X86::MOV16rr;
2704 MovOpc = X86::MOV32rr;
2710 if (Node->getOpcode() != ISD::MULHS)
2711 Opc = Tmp2; // Get the MULHU opcode.
2713 Op0 = Node->getOperand(0);
2714 Op1 = Node->getOperand(1);
2715 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2716 Tmp1 = SelectExpr(Op0);
2717 Tmp2 = SelectExpr(Op1);
2719 Tmp2 = SelectExpr(Op1);
2720 Tmp1 = SelectExpr(Op0);
2723 // FIXME: Implement folding of loads into the memory operands here!
2724 BuildMI(BB, MovOpc, 1, LowReg).addReg(Tmp1);
2725 BuildMI(BB, Opc, 1).addReg(Tmp2);
2726 BuildMI(BB, MovOpc, 1, Result).addReg(HiReg);
2735 static const unsigned SUBTab[] = {
2736 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
2737 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::FSUB32m, X86::FSUB64m,
2738 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB , X86::FpSUB,
2740 static const unsigned SSE_SUBTab[] = {
2741 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
2742 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::SUBSSrm, X86::SUBSDrm,
2743 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::SUBSSrr, X86::SUBSDrr,
2745 static const unsigned MULTab[] = {
2746 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
2747 0, X86::IMUL16rm , X86::IMUL32rm, X86::FMUL32m, X86::FMUL64m,
2748 0, X86::IMUL16rr , X86::IMUL32rr, X86::FpMUL , X86::FpMUL,
2750 static const unsigned SSE_MULTab[] = {
2751 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
2752 0, X86::IMUL16rm , X86::IMUL32rm, X86::MULSSrm, X86::MULSDrm,
2753 0, X86::IMUL16rr , X86::IMUL32rr, X86::MULSSrr, X86::MULSDrr,
2755 static const unsigned ANDTab[] = {
2756 X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, 0,
2757 X86::AND8rm, X86::AND16rm, X86::AND32rm, 0, 0,
2758 X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, 0,
2760 static const unsigned ORTab[] = {
2761 X86::OR8ri, X86::OR16ri, X86::OR32ri, 0, 0,
2762 X86::OR8rm, X86::OR16rm, X86::OR32rm, 0, 0,
2763 X86::OR8rr, X86::OR16rr, X86::OR32rr, 0, 0,
2765 static const unsigned XORTab[] = {
2766 X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, 0,
2767 X86::XOR8rm, X86::XOR16rm, X86::XOR32rm, 0, 0,
2768 X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, 0,
2771 Op0 = Node->getOperand(0);
2772 Op1 = Node->getOperand(1);
2774 if (Node->getOpcode() == ISD::OR && Op0.hasOneUse() && Op1.hasOneUse())
2775 if (EmitOrOpOp(Op0, Op1, Result)) // Match SHLD, SHRD, and rotates.
2778 if (Node->getOpcode() == ISD::SUB)
2779 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(0)))
2780 if (CN->isNullValue()) { // 0 - N -> neg N
2781 switch (N.getValueType()) {
2782 default: assert(0 && "Cannot sub this type!");
2784 case MVT::i8: Opc = X86::NEG8r; break;
2785 case MVT::i16: Opc = X86::NEG16r; break;
2786 case MVT::i32: Opc = X86::NEG32r; break;
2788 Tmp1 = SelectExpr(N.getOperand(1));
2789 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2793 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
2794 if (CN->isAllOnesValue() && Node->getOpcode() == ISD::XOR) {
2796 switch (N.getValueType()) {
2797 default: assert(0 && "Cannot add this type!");
2798 case MVT::i1: break; // Not supported, don't invert upper bits!
2799 case MVT::i8: Opc = X86::NOT8r; break;
2800 case MVT::i16: Opc = X86::NOT16r; break;
2801 case MVT::i32: Opc = X86::NOT32r; break;
2804 Tmp1 = SelectExpr(Op0);
2805 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
2810 // Fold common multiplies into LEA instructions.
2811 if (Node->getOpcode() == ISD::MUL && N.getValueType() == MVT::i32) {
2812 switch ((int)CN->getValue()) {
2817 // Remove N from exprmap so SelectAddress doesn't get confused.
2820 SelectAddress(N, AM);
2821 // Restore it to the map.
2822 ExprMap[N] = Result;
2823 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), AM);
2828 switch (N.getValueType()) {
2829 default: assert(0 && "Cannot xor this type!");
2831 case MVT::i8: Opc = 0; break;
2832 case MVT::i16: Opc = 1; break;
2833 case MVT::i32: Opc = 2; break;
2835 switch (Node->getOpcode()) {
2836 default: assert(0 && "Unreachable!");
2837 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
2838 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
2839 case ISD::AND: Opc = ANDTab[Opc]; break;
2840 case ISD::OR: Opc = ORTab[Opc]; break;
2841 case ISD::XOR: Opc = XORTab[Opc]; break;
2843 if (Opc) { // Can't fold MUL:i8 R, imm
2844 Tmp1 = SelectExpr(Op0);
2845 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
2850 if (isFoldableLoad(Op0, Op1, true))
2851 if (Node->getOpcode() != ISD::SUB) {
2852 std::swap(Op0, Op1);
2855 // For FP, emit 'reverse' subract, with a memory operand.
2856 if (N.getValueType() == MVT::f64 && !X86ScalarSSE) {
2857 if (Op0.getOpcode() == ISD::EXTLOAD)
2858 Opc = X86::FSUBR32m;
2860 Opc = X86::FSUBR64m;
2863 EmitFoldedLoad(Op0, AM);
2864 Tmp1 = SelectExpr(Op1);
2865 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2870 if (isFoldableLoad(Op1, Op0, true)) {
2872 switch (N.getValueType()) {
2873 default: assert(0 && "Cannot operate on this type!");
2875 case MVT::i8: Opc = 5; break;
2876 case MVT::i16: Opc = 6; break;
2877 case MVT::i32: Opc = 7; break;
2878 case MVT::f32: Opc = 8; break;
2879 // For F64, handle promoted load operations (from F32) as well!
2881 assert((!X86ScalarSSE || Op1.getOpcode() == ISD::LOAD) &&
2882 "SSE load should have been promoted");
2883 Opc = Op1.getOpcode() == ISD::LOAD ? 9 : 8; break;
2885 switch (Node->getOpcode()) {
2886 default: assert(0 && "Unreachable!");
2887 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
2888 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
2889 case ISD::AND: Opc = ANDTab[Opc]; break;
2890 case ISD::OR: Opc = ORTab[Opc]; break;
2891 case ISD::XOR: Opc = XORTab[Opc]; break;
2895 EmitFoldedLoad(Op1, AM);
2896 Tmp1 = SelectExpr(Op0);
2898 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
2900 assert(Node->getOpcode() == ISD::MUL &&
2901 N.getValueType() == MVT::i8 && "Unexpected situation!");
2902 // Must use the MUL instruction, which forces use of AL.
2903 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
2904 addFullAddress(BuildMI(BB, X86::MUL8m, 1), AM);
2905 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
2910 if (getRegPressure(Op0) > getRegPressure(Op1)) {
2911 Tmp1 = SelectExpr(Op0);
2912 Tmp2 = SelectExpr(Op1);
2914 Tmp2 = SelectExpr(Op1);
2915 Tmp1 = SelectExpr(Op0);
2918 switch (N.getValueType()) {
2919 default: assert(0 && "Cannot add this type!");
2921 case MVT::i8: Opc = 10; break;
2922 case MVT::i16: Opc = 11; break;
2923 case MVT::i32: Opc = 12; break;
2924 case MVT::f32: Opc = 13; break;
2925 case MVT::f64: Opc = 14; break;
2927 switch (Node->getOpcode()) {
2928 default: assert(0 && "Unreachable!");
2929 case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
2930 case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
2931 case ISD::AND: Opc = ANDTab[Opc]; break;
2932 case ISD::OR: Opc = ORTab[Opc]; break;
2933 case ISD::XOR: Opc = XORTab[Opc]; break;
2936 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
2938 assert(Node->getOpcode() == ISD::MUL &&
2939 N.getValueType() == MVT::i8 && "Unexpected situation!");
2940 // Must use the MUL instruction, which forces use of AL.
2941 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
2942 BuildMI(BB, X86::MUL8r, 1).addReg(Tmp2);
2943 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
2947 case ISD::ADD_PARTS:
2948 case ISD::SUB_PARTS: {
2949 assert(N.getNumOperands() == 4 && N.getValueType() == MVT::i32 &&
2950 "Not an i64 add/sub!");
2951 // Emit all of the operands.
2952 std::vector<unsigned> InVals;
2953 for (unsigned i = 0, e = N.getNumOperands(); i != e; ++i)
2954 InVals.push_back(SelectExpr(N.getOperand(i)));
2955 if (N.getOpcode() == ISD::ADD_PARTS) {
2956 BuildMI(BB, X86::ADD32rr, 2, Result).addReg(InVals[0]).addReg(InVals[2]);
2957 BuildMI(BB, X86::ADC32rr,2,Result+1).addReg(InVals[1]).addReg(InVals[3]);
2959 BuildMI(BB, X86::SUB32rr, 2, Result).addReg(InVals[0]).addReg(InVals[2]);
2960 BuildMI(BB, X86::SBB32rr, 2,Result+1).addReg(InVals[1]).addReg(InVals[3]);
2962 return Result+N.ResNo;
2965 case ISD::SHL_PARTS:
2966 case ISD::SRA_PARTS:
2967 case ISD::SRL_PARTS: {
2968 assert(N.getNumOperands() == 3 && N.getValueType() == MVT::i32 &&
2969 "Not an i64 shift!");
2970 unsigned ShiftOpLo = SelectExpr(N.getOperand(0));
2971 unsigned ShiftOpHi = SelectExpr(N.getOperand(1));
2972 unsigned TmpReg = MakeReg(MVT::i32);
2973 if (N.getOpcode() == ISD::SRA_PARTS) {
2974 // If this is a SHR of a Long, then we need to do funny sign extension
2975 // stuff. TmpReg gets the value to use as the high-part if we are
2976 // shifting more than 32 bits.
2977 BuildMI(BB, X86::SAR32ri, 2, TmpReg).addReg(ShiftOpHi).addImm(31);
2979 // Other shifts use a fixed zero value if the shift is more than 32 bits.
2980 BuildMI(BB, X86::MOV32ri, 1, TmpReg).addImm(0);
2983 // Initialize CL with the shift amount.
2984 unsigned ShiftAmountReg = SelectExpr(N.getOperand(2));
2985 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
2987 unsigned TmpReg2 = MakeReg(MVT::i32);
2988 unsigned TmpReg3 = MakeReg(MVT::i32);
2989 if (N.getOpcode() == ISD::SHL_PARTS) {
2990 // TmpReg2 = shld inHi, inLo
2991 BuildMI(BB, X86::SHLD32rrCL, 2,TmpReg2).addReg(ShiftOpHi)
2993 // TmpReg3 = shl inLo, CL
2994 BuildMI(BB, X86::SHL32rCL, 1, TmpReg3).addReg(ShiftOpLo);
2996 // Set the flags to indicate whether the shift was by more than 32 bits.
2997 BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
2999 // DestHi = (>32) ? TmpReg3 : TmpReg2;
3000 BuildMI(BB, X86::CMOVNE32rr, 2,
3001 Result+1).addReg(TmpReg2).addReg(TmpReg3);
3002 // DestLo = (>32) ? TmpReg : TmpReg3;
3003 BuildMI(BB, X86::CMOVNE32rr, 2,
3004 Result).addReg(TmpReg3).addReg(TmpReg);
3006 // TmpReg2 = shrd inLo, inHi
3007 BuildMI(BB, X86::SHRD32rrCL,2,TmpReg2).addReg(ShiftOpLo)
3009 // TmpReg3 = s[ah]r inHi, CL
3010 BuildMI(BB, N.getOpcode() == ISD::SRA_PARTS ? X86::SAR32rCL
3011 : X86::SHR32rCL, 1, TmpReg3)
3014 // Set the flags to indicate whether the shift was by more than 32 bits.
3015 BuildMI(BB, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
3017 // DestLo = (>32) ? TmpReg3 : TmpReg2;
3018 BuildMI(BB, X86::CMOVNE32rr, 2,
3019 Result).addReg(TmpReg2).addReg(TmpReg3);
3021 // DestHi = (>32) ? TmpReg : TmpReg3;
3022 BuildMI(BB, X86::CMOVNE32rr, 2,
3023 Result+1).addReg(TmpReg3).addReg(TmpReg);
3025 return Result+N.ResNo;
3029 EmitSelectCC(N.getOperand(0), N.getOperand(1), N.getOperand(2),
3030 N.getValueType(), Result);
3037 assert((N.getOpcode() != ISD::SREM || MVT::isInteger(N.getValueType())) &&
3038 "We don't support this operator!");
3040 if (N.getOpcode() == ISD::SDIV) {
3041 // We can fold loads into FpDIVs, but not really into any others.
3042 if (N.getValueType() == MVT::f64 && !X86ScalarSSE) {
3043 // Check for reversed and unreversed DIV.
3044 if (isFoldableLoad(N.getOperand(0), N.getOperand(1), true)) {
3045 if (N.getOperand(0).getOpcode() == ISD::EXTLOAD)
3046 Opc = X86::FDIVR32m;
3048 Opc = X86::FDIVR64m;
3050 EmitFoldedLoad(N.getOperand(0), AM);
3051 Tmp1 = SelectExpr(N.getOperand(1));
3052 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
3054 } else if (isFoldableLoad(N.getOperand(1), N.getOperand(0), true) &&
3055 N.getOperand(1).getOpcode() == ISD::LOAD) {
3056 if (N.getOperand(1).getOpcode() == ISD::EXTLOAD)
3061 EmitFoldedLoad(N.getOperand(1), AM);
3062 Tmp1 = SelectExpr(N.getOperand(0));
3063 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
3068 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3069 // FIXME: These special cases should be handled by the lowering impl!
3070 unsigned RHS = CN->getValue();
3076 if (RHS && (RHS & (RHS-1)) == 0) { // Signed division by power of 2?
3077 unsigned Log = Log2_32(RHS);
3078 unsigned SAROpc, SHROpc, ADDOpc, NEGOpc;
3079 switch (N.getValueType()) {
3080 default: assert("Unknown type to signed divide!");
3082 SAROpc = X86::SAR8ri;
3083 SHROpc = X86::SHR8ri;
3084 ADDOpc = X86::ADD8rr;
3085 NEGOpc = X86::NEG8r;
3088 SAROpc = X86::SAR16ri;
3089 SHROpc = X86::SHR16ri;
3090 ADDOpc = X86::ADD16rr;
3091 NEGOpc = X86::NEG16r;
3094 SAROpc = X86::SAR32ri;
3095 SHROpc = X86::SHR32ri;
3096 ADDOpc = X86::ADD32rr;
3097 NEGOpc = X86::NEG32r;
3100 unsigned RegSize = MVT::getSizeInBits(N.getValueType());
3101 Tmp1 = SelectExpr(N.getOperand(0));
3104 TmpReg = MakeReg(N.getValueType());
3105 BuildMI(BB, SAROpc, 2, TmpReg).addReg(Tmp1).addImm(Log-1);
3109 unsigned TmpReg2 = MakeReg(N.getValueType());
3110 BuildMI(BB, SHROpc, 2, TmpReg2).addReg(TmpReg).addImm(RegSize-Log);
3111 unsigned TmpReg3 = MakeReg(N.getValueType());
3112 BuildMI(BB, ADDOpc, 2, TmpReg3).addReg(Tmp1).addReg(TmpReg2);
3114 unsigned TmpReg4 = isNeg ? MakeReg(N.getValueType()) : Result;
3115 BuildMI(BB, SAROpc, 2, TmpReg4).addReg(TmpReg3).addImm(Log);
3117 BuildMI(BB, NEGOpc, 1, Result).addReg(TmpReg4);
3123 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3124 Tmp1 = SelectExpr(N.getOperand(0));
3125 Tmp2 = SelectExpr(N.getOperand(1));
3127 Tmp2 = SelectExpr(N.getOperand(1));
3128 Tmp1 = SelectExpr(N.getOperand(0));
3131 bool isSigned = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::SREM;
3132 bool isDiv = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::UDIV;
3133 unsigned LoReg, HiReg, DivOpcode, MovOpcode, ClrOpcode, SExtOpcode;
3134 switch (N.getValueType()) {
3135 default: assert(0 && "Cannot sdiv this type!");
3137 DivOpcode = isSigned ? X86::IDIV8r : X86::DIV8r;
3140 MovOpcode = X86::MOV8rr;
3141 ClrOpcode = X86::MOV8ri;
3142 SExtOpcode = X86::CBW;
3145 DivOpcode = isSigned ? X86::IDIV16r : X86::DIV16r;
3148 MovOpcode = X86::MOV16rr;
3149 ClrOpcode = X86::MOV16ri;
3150 SExtOpcode = X86::CWD;
3153 DivOpcode = isSigned ? X86::IDIV32r : X86::DIV32r;
3156 MovOpcode = X86::MOV32rr;
3157 ClrOpcode = X86::MOV32ri;
3158 SExtOpcode = X86::CDQ;
3161 BuildMI(BB, X86::DIVSSrr, 2, Result).addReg(Tmp1).addReg(Tmp2);
3164 Opc = X86ScalarSSE ? X86::DIVSDrr : X86::FpDIV;
3165 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3169 // Set up the low part.
3170 BuildMI(BB, MovOpcode, 1, LoReg).addReg(Tmp1);
3173 // Sign extend the low part into the high part.
3174 BuildMI(BB, SExtOpcode, 0);
3176 // Zero out the high part, effectively zero extending the input.
3177 BuildMI(BB, ClrOpcode, 1, HiReg).addImm(0);
3180 // Emit the DIV/IDIV instruction.
3181 BuildMI(BB, DivOpcode, 1).addReg(Tmp2);
3183 // Get the result of the divide or rem.
3184 BuildMI(BB, MovOpcode, 1, Result).addReg(isDiv ? LoReg : HiReg);
3189 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3190 if (CN->getValue() == 1) { // X = SHL Y, 1 -> X = ADD Y, Y
3191 switch (N.getValueType()) {
3192 default: assert(0 && "Cannot shift this type!");
3193 case MVT::i8: Opc = X86::ADD8rr; break;
3194 case MVT::i16: Opc = X86::ADD16rr; break;
3195 case MVT::i32: Opc = X86::ADD32rr; break;
3197 Tmp1 = SelectExpr(N.getOperand(0));
3198 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp1);
3202 switch (N.getValueType()) {
3203 default: assert(0 && "Cannot shift this type!");
3204 case MVT::i8: Opc = X86::SHL8ri; break;
3205 case MVT::i16: Opc = X86::SHL16ri; break;
3206 case MVT::i32: Opc = X86::SHL32ri; break;
3208 Tmp1 = SelectExpr(N.getOperand(0));
3209 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3213 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3214 Tmp1 = SelectExpr(N.getOperand(0));
3215 Tmp2 = SelectExpr(N.getOperand(1));
3217 Tmp2 = SelectExpr(N.getOperand(1));
3218 Tmp1 = SelectExpr(N.getOperand(0));
3221 switch (N.getValueType()) {
3222 default: assert(0 && "Cannot shift this type!");
3223 case MVT::i8 : Opc = X86::SHL8rCL; break;
3224 case MVT::i16: Opc = X86::SHL16rCL; break;
3225 case MVT::i32: Opc = X86::SHL32rCL; break;
3227 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3228 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3231 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3232 switch (N.getValueType()) {
3233 default: assert(0 && "Cannot shift this type!");
3234 case MVT::i8: Opc = X86::SHR8ri; break;
3235 case MVT::i16: Opc = X86::SHR16ri; break;
3236 case MVT::i32: Opc = X86::SHR32ri; break;
3238 Tmp1 = SelectExpr(N.getOperand(0));
3239 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3243 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3244 Tmp1 = SelectExpr(N.getOperand(0));
3245 Tmp2 = SelectExpr(N.getOperand(1));
3247 Tmp2 = SelectExpr(N.getOperand(1));
3248 Tmp1 = SelectExpr(N.getOperand(0));
3251 switch (N.getValueType()) {
3252 default: assert(0 && "Cannot shift this type!");
3253 case MVT::i8 : Opc = X86::SHR8rCL; break;
3254 case MVT::i16: Opc = X86::SHR16rCL; break;
3255 case MVT::i32: Opc = X86::SHR32rCL; break;
3257 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3258 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3261 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3262 switch (N.getValueType()) {
3263 default: assert(0 && "Cannot shift this type!");
3264 case MVT::i8: Opc = X86::SAR8ri; break;
3265 case MVT::i16: Opc = X86::SAR16ri; break;
3266 case MVT::i32: Opc = X86::SAR32ri; break;
3268 Tmp1 = SelectExpr(N.getOperand(0));
3269 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
3273 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3274 Tmp1 = SelectExpr(N.getOperand(0));
3275 Tmp2 = SelectExpr(N.getOperand(1));
3277 Tmp2 = SelectExpr(N.getOperand(1));
3278 Tmp1 = SelectExpr(N.getOperand(0));
3281 switch (N.getValueType()) {
3282 default: assert(0 && "Cannot shift this type!");
3283 case MVT::i8 : Opc = X86::SAR8rCL; break;
3284 case MVT::i16: Opc = X86::SAR16rCL; break;
3285 case MVT::i32: Opc = X86::SAR32rCL; break;
3287 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
3288 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
3292 EmitCMP(N.getOperand(0), N.getOperand(1), Node->hasOneUse());
3293 EmitSetCC(BB, Result, cast<CondCodeSDNode>(N.getOperand(2))->get(),
3294 MVT::isFloatingPoint(N.getOperand(1).getValueType()));
3297 // Make sure we generate both values.
3298 if (Result != 1) { // Generate the token
3299 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3300 assert(0 && "Load already emitted!?");
3302 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3304 switch (Node->getValueType(0)) {
3305 default: assert(0 && "Cannot load this type!");
3307 case MVT::i8: Opc = X86::MOV8rm; break;
3308 case MVT::i16: Opc = X86::MOV16rm; break;
3309 case MVT::i32: Opc = X86::MOV32rm; break;
3310 case MVT::f32: Opc = X86::MOVSSrm; break;
3316 ContainsFPCode = true;
3321 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1))){
3322 Select(N.getOperand(0));
3323 addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CP->getIndex());
3327 SDOperand Chain = N.getOperand(0);
3328 SDOperand Address = N.getOperand(1);
3329 if (getRegPressure(Chain) > getRegPressure(Address)) {
3331 SelectAddress(Address, AM);
3333 SelectAddress(Address, AM);
3337 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
3340 case X86ISD::FILD64m:
3341 // Make sure we generate both values.
3342 assert(Result != 1 && N.getValueType() == MVT::f64);
3343 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3344 assert(0 && "Load already emitted!?");
3349 SDOperand Chain = N.getOperand(0);
3350 SDOperand Address = N.getOperand(1);
3351 if (getRegPressure(Chain) > getRegPressure(Address)) {
3353 SelectAddress(Address, AM);
3355 SelectAddress(Address, AM);
3359 addFullAddress(BuildMI(BB, X86::FILD64m, 4, Result), AM);
3363 case ISD::EXTLOAD: // Arbitrarily codegen extloads as MOVZX*
3364 case ISD::ZEXTLOAD: {
3365 // Make sure we generate both values.
3367 ExprMap[N.getValue(1)] = 1; // Generate the token
3369 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3371 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1)))
3372 if (Node->getValueType(0) == MVT::f64) {
3373 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::f32 &&
3375 addConstantPoolReference(BuildMI(BB, X86::FLD32m, 4, Result),
3381 if (getRegPressure(Node->getOperand(0)) >
3382 getRegPressure(Node->getOperand(1))) {
3383 Select(Node->getOperand(0)); // chain
3384 SelectAddress(Node->getOperand(1), AM);
3386 SelectAddress(Node->getOperand(1), AM);
3387 Select(Node->getOperand(0)); // chain
3390 switch (Node->getValueType(0)) {
3391 default: assert(0 && "Unknown type to sign extend to.");
3393 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::f32 &&
3395 addFullAddress(BuildMI(BB, X86::FLD32m, 5, Result), AM);
3398 switch (cast<VTSDNode>(Node->getOperand(3))->getVT()) {
3400 assert(0 && "Bad zero extend!");
3403 addFullAddress(BuildMI(BB, X86::MOVZX32rm8, 5, Result), AM);
3406 addFullAddress(BuildMI(BB, X86::MOVZX32rm16, 5, Result), AM);
3411 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() <= MVT::i8 &&
3412 "Bad zero extend!");
3413 addFullAddress(BuildMI(BB, X86::MOVSX16rm8, 5, Result), AM);
3416 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::i1 &&
3417 "Bad zero extend!");
3418 addFullAddress(BuildMI(BB, X86::MOV8rm, 5, Result), AM);
3423 case ISD::SEXTLOAD: {
3424 // Make sure we generate both values.
3426 ExprMap[N.getValue(1)] = 1; // Generate the token
3428 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3431 if (getRegPressure(Node->getOperand(0)) >
3432 getRegPressure(Node->getOperand(1))) {
3433 Select(Node->getOperand(0)); // chain
3434 SelectAddress(Node->getOperand(1), AM);
3436 SelectAddress(Node->getOperand(1), AM);
3437 Select(Node->getOperand(0)); // chain
3440 switch (Node->getValueType(0)) {
3441 case MVT::i8: assert(0 && "Cannot sign extend from bool!");
3442 default: assert(0 && "Unknown type to sign extend to.");
3444 switch (cast<VTSDNode>(Node->getOperand(3))->getVT()) {
3446 case MVT::i1: assert(0 && "Cannot sign extend from bool!");
3448 addFullAddress(BuildMI(BB, X86::MOVSX32rm8, 5, Result), AM);
3451 addFullAddress(BuildMI(BB, X86::MOVSX32rm16, 5, Result), AM);
3456 assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::i8 &&
3457 "Cannot sign extend from bool!");
3458 addFullAddress(BuildMI(BB, X86::MOVSX16rm8, 5, Result), AM);
3464 case ISD::DYNAMIC_STACKALLOC:
3465 // Generate both result values.
3467 ExprMap[N.getValue(1)] = 1; // Generate the token
3469 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3471 // FIXME: We are currently ignoring the requested alignment for handling
3472 // greater than the stack alignment. This will need to be revisited at some
3473 // point. Align = N.getOperand(2);
3475 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
3476 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
3477 std::cerr << "Cannot allocate stack object with greater alignment than"
3478 << " the stack alignment yet!";
3482 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
3483 Select(N.getOperand(0));
3484 BuildMI(BB, X86::SUB32ri, 2, X86::ESP).addReg(X86::ESP)
3485 .addImm(CN->getValue());
3487 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3488 Select(N.getOperand(0));
3489 Tmp1 = SelectExpr(N.getOperand(1));
3491 Tmp1 = SelectExpr(N.getOperand(1));
3492 Select(N.getOperand(0));
3495 // Subtract size from stack pointer, thereby allocating some space.
3496 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(Tmp1);
3499 // Put a pointer to the space into the result register, by copying the stack
3501 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::ESP);
3504 case X86ISD::TAILCALL:
3505 case X86ISD::CALL: {
3506 // The chain for this call is now lowered.
3507 ExprMap.insert(std::make_pair(N.getValue(0), 1));
3509 bool isDirect = isa<GlobalAddressSDNode>(N.getOperand(1)) ||
3510 isa<ExternalSymbolSDNode>(N.getOperand(1));
3511 unsigned Callee = 0;
3513 Select(N.getOperand(0));
3515 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3516 Select(N.getOperand(0));
3517 Callee = SelectExpr(N.getOperand(1));
3519 Callee = SelectExpr(N.getOperand(1));
3520 Select(N.getOperand(0));
3524 // If this call has values to pass in registers, do so now.
3525 if (Node->getNumOperands() > 4) {
3526 // The first value is passed in (a part of) EAX, the second in EDX.
3527 unsigned RegOp1 = SelectExpr(N.getOperand(4));
3529 Node->getNumOperands() > 5 ? SelectExpr(N.getOperand(5)) : 0;
3531 switch (N.getOperand(4).getValueType()) {
3532 default: assert(0 && "Bad thing to pass in regs");
3534 case MVT::i8: BuildMI(BB, X86::MOV8rr , 1,X86::AL).addReg(RegOp1); break;
3535 case MVT::i16: BuildMI(BB, X86::MOV16rr, 1,X86::AX).addReg(RegOp1); break;
3536 case MVT::i32: BuildMI(BB, X86::MOV32rr, 1,X86::EAX).addReg(RegOp1);break;
3539 switch (N.getOperand(5).getValueType()) {
3540 default: assert(0 && "Bad thing to pass in regs");
3543 BuildMI(BB, X86::MOV8rr , 1, X86::DL).addReg(RegOp2);
3546 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(RegOp2);
3549 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RegOp2);
3554 if (GlobalAddressSDNode *GASD =
3555 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
3556 BuildMI(BB, X86::CALLpcrel32, 1).addGlobalAddress(GASD->getGlobal(),true);
3557 } else if (ExternalSymbolSDNode *ESSDN =
3558 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1))) {
3559 BuildMI(BB, X86::CALLpcrel32,
3560 1).addExternalSymbol(ESSDN->getSymbol(), true);
3562 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
3563 Select(N.getOperand(0));
3564 Tmp1 = SelectExpr(N.getOperand(1));
3566 Tmp1 = SelectExpr(N.getOperand(1));
3567 Select(N.getOperand(0));
3570 BuildMI(BB, X86::CALL32r, 1).addReg(Tmp1);
3573 // Get caller stack amount and amount the callee added to the stack pointer.
3574 Tmp1 = cast<ConstantSDNode>(N.getOperand(2))->getValue();
3575 Tmp2 = cast<ConstantSDNode>(N.getOperand(3))->getValue();
3576 BuildMI(BB, X86::ADJCALLSTACKUP, 2).addImm(Tmp1).addImm(Tmp2);
3578 if (Node->getNumValues() != 1)
3579 switch (Node->getValueType(1)) {
3580 default: assert(0 && "Unknown value type for call result!");
3581 case MVT::Other: return 1;
3584 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3587 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3590 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3591 if (Node->getNumValues() == 3 && Node->getValueType(2) == MVT::i32)
3592 BuildMI(BB, X86::MOV32rr, 1, Result+1).addReg(X86::EDX);
3594 case MVT::f64: // Floating-point return values live in %ST(0)
3596 ContainsFPCode = true;
3597 BuildMI(BB, X86::FpGETRESULT, 1, X86::FP0);
3599 unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
3600 MachineFunction *F = BB->getParent();
3601 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
3602 addFrameReference(BuildMI(BB, X86::FST64m, 5), FrameIdx).addReg(X86::FP0);
3603 addFrameReference(BuildMI(BB, X86::MOVSDrm, 4, Result), FrameIdx);
3606 ContainsFPCode = true;
3607 BuildMI(BB, X86::FpGETRESULT, 1, Result);
3611 return Result+N.ResNo-1;
3614 // First, determine that the size of the operand falls within the acceptable
3615 // range for this architecture.
3617 if (Node->getOperand(1).getValueType() != MVT::i16) {
3618 std::cerr << "llvm.readport: Address size is not 16 bits\n";
3622 // Make sure we generate both values.
3623 if (Result != 1) { // Generate the token
3624 if (!ExprMap.insert(std::make_pair(N.getValue(1), 1)).second)
3625 assert(0 && "readport already emitted!?");
3627 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
3629 Select(Node->getOperand(0)); // Select the chain.
3631 // If the port is a single-byte constant, use the immediate form.
3632 if (ConstantSDNode *Port = dyn_cast<ConstantSDNode>(Node->getOperand(1)))
3633 if ((Port->getValue() & 255) == Port->getValue()) {
3634 switch (Node->getValueType(0)) {
3636 BuildMI(BB, X86::IN8ri, 1).addImm(Port->getValue());
3637 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3640 BuildMI(BB, X86::IN16ri, 1).addImm(Port->getValue());
3641 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3644 BuildMI(BB, X86::IN32ri, 1).addImm(Port->getValue());
3645 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3651 // Now, move the I/O port address into the DX register and use the IN
3652 // instruction to get the input data.
3654 Tmp1 = SelectExpr(Node->getOperand(1));
3655 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Tmp1);
3656 switch (Node->getValueType(0)) {
3658 BuildMI(BB, X86::IN8rr, 0);
3659 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
3662 BuildMI(BB, X86::IN16rr, 0);
3663 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
3666 BuildMI(BB, X86::IN32rr, 0);
3667 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
3670 std::cerr << "Cannot do input on this data type";
3679 /// TryToFoldLoadOpStore - Given a store node, try to fold together a
3680 /// load/op/store instruction. If successful return true.
3681 bool ISel::TryToFoldLoadOpStore(SDNode *Node) {
3682 assert(Node->getOpcode() == ISD::STORE && "Can only do this for stores!");
3683 SDOperand Chain = Node->getOperand(0);
3684 SDOperand StVal = Node->getOperand(1);
3685 SDOperand StPtr = Node->getOperand(2);
3687 // The chain has to be a load, the stored value must be an integer binary
3688 // operation with one use.
3689 if (!StVal.Val->hasOneUse() || StVal.Val->getNumOperands() != 2 ||
3690 MVT::isFloatingPoint(StVal.getValueType()))
3693 // Token chain must either be a factor node or the load to fold.
3694 if (Chain.getOpcode() != ISD::LOAD && Chain.getOpcode() != ISD::TokenFactor)
3699 // Check to see if there is a load from the same pointer that we're storing
3700 // to in either operand of the binop.
3701 if (StVal.getOperand(0).getOpcode() == ISD::LOAD &&
3702 StVal.getOperand(0).getOperand(1) == StPtr)
3703 TheLoad = StVal.getOperand(0);
3704 else if (StVal.getOperand(1).getOpcode() == ISD::LOAD &&
3705 StVal.getOperand(1).getOperand(1) == StPtr)
3706 TheLoad = StVal.getOperand(1);
3708 return false; // No matching load operand.
3710 // We can only fold the load if there are no intervening side-effecting
3711 // operations. This means that the store uses the load as its token chain, or
3712 // there are only token factor nodes in between the store and load.
3713 if (Chain != TheLoad.getValue(1)) {
3714 // Okay, the other option is that we have a store referring to (possibly
3715 // nested) token factor nodes. For now, just try peeking through one level
3716 // of token factors to see if this is the case.
3717 bool ChainOk = false;
3718 if (Chain.getOpcode() == ISD::TokenFactor) {
3719 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3720 if (Chain.getOperand(i) == TheLoad.getValue(1)) {
3726 if (!ChainOk) return false;
3729 if (TheLoad.getOperand(1) != StPtr)
3732 // Make sure that one of the operands of the binop is the load, and that the
3733 // load folds into the binop.
3734 if (((StVal.getOperand(0) != TheLoad ||
3735 !isFoldableLoad(TheLoad, StVal.getOperand(1))) &&
3736 (StVal.getOperand(1) != TheLoad ||
3737 !isFoldableLoad(TheLoad, StVal.getOperand(0)))))
3740 // Finally, check to see if this is one of the ops we can handle!
3741 static const unsigned ADDTAB[] = {
3742 X86::ADD8mi, X86::ADD16mi, X86::ADD32mi,
3743 X86::ADD8mr, X86::ADD16mr, X86::ADD32mr,
3745 static const unsigned SUBTAB[] = {
3746 X86::SUB8mi, X86::SUB16mi, X86::SUB32mi,
3747 X86::SUB8mr, X86::SUB16mr, X86::SUB32mr,
3749 static const unsigned ANDTAB[] = {
3750 X86::AND8mi, X86::AND16mi, X86::AND32mi,
3751 X86::AND8mr, X86::AND16mr, X86::AND32mr,
3753 static const unsigned ORTAB[] = {
3754 X86::OR8mi, X86::OR16mi, X86::OR32mi,
3755 X86::OR8mr, X86::OR16mr, X86::OR32mr,
3757 static const unsigned XORTAB[] = {
3758 X86::XOR8mi, X86::XOR16mi, X86::XOR32mi,
3759 X86::XOR8mr, X86::XOR16mr, X86::XOR32mr,
3761 static const unsigned SHLTAB[] = {
3762 X86::SHL8mi, X86::SHL16mi, X86::SHL32mi,
3763 /*Have to put the reg in CL*/0, 0, 0,
3765 static const unsigned SARTAB[] = {
3766 X86::SAR8mi, X86::SAR16mi, X86::SAR32mi,
3767 /*Have to put the reg in CL*/0, 0, 0,
3769 static const unsigned SHRTAB[] = {
3770 X86::SHR8mi, X86::SHR16mi, X86::SHR32mi,
3771 /*Have to put the reg in CL*/0, 0, 0,
3774 const unsigned *TabPtr = 0;
3775 switch (StVal.getOpcode()) {
3777 std::cerr << "CANNOT [mem] op= val: ";
3778 StVal.Val->dump(); std::cerr << "\n";
3783 case ISD::UREM: return false;
3785 case ISD::ADD: TabPtr = ADDTAB; break;
3786 case ISD::SUB: TabPtr = SUBTAB; break;
3787 case ISD::AND: TabPtr = ANDTAB; break;
3788 case ISD:: OR: TabPtr = ORTAB; break;
3789 case ISD::XOR: TabPtr = XORTAB; break;
3790 case ISD::SHL: TabPtr = SHLTAB; break;
3791 case ISD::SRA: TabPtr = SARTAB; break;
3792 case ISD::SRL: TabPtr = SHRTAB; break;
3795 // Handle: [mem] op= CST
3796 SDOperand Op0 = StVal.getOperand(0);
3797 SDOperand Op1 = StVal.getOperand(1);
3799 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
3800 switch (Op0.getValueType()) { // Use Op0's type because of shifts.
3803 case MVT::i8: Opc = TabPtr[0]; break;
3804 case MVT::i16: Opc = TabPtr[1]; break;
3805 case MVT::i32: Opc = TabPtr[2]; break;
3809 if (!ExprMap.insert(std::make_pair(TheLoad.getValue(1), 1)).second)
3810 assert(0 && "Already emitted?");
3814 if (getRegPressure(TheLoad.getOperand(0)) >
3815 getRegPressure(TheLoad.getOperand(1))) {
3816 Select(TheLoad.getOperand(0));
3817 SelectAddress(TheLoad.getOperand(1), AM);
3819 SelectAddress(TheLoad.getOperand(1), AM);
3820 Select(TheLoad.getOperand(0));
3823 if (StVal.getOpcode() == ISD::ADD) {
3824 if (CN->getValue() == 1) {
3825 switch (Op0.getValueType()) {
3828 addFullAddress(BuildMI(BB, X86::INC8m, 4), AM);
3830 case MVT::i16: Opc = TabPtr[1];
3831 addFullAddress(BuildMI(BB, X86::INC16m, 4), AM);
3833 case MVT::i32: Opc = TabPtr[2];
3834 addFullAddress(BuildMI(BB, X86::INC32m, 4), AM);
3837 } else if (CN->getValue()+1 == 0) { // [X] += -1 -> DEC [X]
3838 switch (Op0.getValueType()) {
3841 addFullAddress(BuildMI(BB, X86::DEC8m, 4), AM);
3843 case MVT::i16: Opc = TabPtr[1];
3844 addFullAddress(BuildMI(BB, X86::DEC16m, 4), AM);
3846 case MVT::i32: Opc = TabPtr[2];
3847 addFullAddress(BuildMI(BB, X86::DEC32m, 4), AM);
3853 addFullAddress(BuildMI(BB, Opc, 4+1),AM).addImm(CN->getValue());
3858 // If we have [mem] = V op [mem], try to turn it into:
3859 // [mem] = [mem] op V.
3860 if (Op1 == TheLoad && StVal.getOpcode() != ISD::SUB &&
3861 StVal.getOpcode() != ISD::SHL && StVal.getOpcode() != ISD::SRA &&
3862 StVal.getOpcode() != ISD::SRL)
3863 std::swap(Op0, Op1);
3865 if (Op0 != TheLoad) return false;
3867 switch (Op0.getValueType()) {
3868 default: return false;
3870 case MVT::i8: Opc = TabPtr[3]; break;
3871 case MVT::i16: Opc = TabPtr[4]; break;
3872 case MVT::i32: Opc = TabPtr[5]; break;
3875 // Table entry doesn't exist?
3876 if (Opc == 0) return false;
3878 if (!ExprMap.insert(std::make_pair(TheLoad.getValue(1), 1)).second)
3879 assert(0 && "Already emitted?");
3881 Select(TheLoad.getOperand(0));
3884 SelectAddress(TheLoad.getOperand(1), AM);
3885 unsigned Reg = SelectExpr(Op1);
3886 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Reg);
3890 /// If node is a ret(tailcall) node, emit the specified tail call and return
3891 /// true, otherwise return false.
3893 /// FIXME: This whole thing should be a post-legalize optimization pass which
3894 /// recognizes and transforms the dag. We don't want the selection phase doing
3897 bool ISel::EmitPotentialTailCall(SDNode *RetNode) {
3898 assert(RetNode->getOpcode() == ISD::RET && "Not a return");
3900 SDOperand Chain = RetNode->getOperand(0);
3902 // If this is a token factor node where one operand is a call, dig into it.
3903 SDOperand TokFactor;
3904 unsigned TokFactorOperand = 0;
3905 if (Chain.getOpcode() == ISD::TokenFactor) {
3906 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3907 if (Chain.getOperand(i).getOpcode() == ISD::CALLSEQ_END ||
3908 Chain.getOperand(i).getOpcode() == X86ISD::TAILCALL) {
3909 TokFactorOperand = i;
3911 Chain = Chain.getOperand(i);
3914 if (TokFactor.Val == 0) return false; // No call operand.
3917 // Skip the CALLSEQ_END node if present.
3918 if (Chain.getOpcode() == ISD::CALLSEQ_END)
3919 Chain = Chain.getOperand(0);
3921 // Is a tailcall the last control operation that occurs before the return?
3922 if (Chain.getOpcode() != X86ISD::TAILCALL)
3925 // If we return a value, is it the value produced by the call?
3926 if (RetNode->getNumOperands() > 1) {
3927 // Not returning the ret val of the call?
3928 if (Chain.Val->getNumValues() == 1 ||
3929 RetNode->getOperand(1) != Chain.getValue(1))
3932 if (RetNode->getNumOperands() > 2) {
3933 if (Chain.Val->getNumValues() == 2 ||
3934 RetNode->getOperand(2) != Chain.getValue(2))
3937 assert(RetNode->getNumOperands() <= 3);
3940 // CalleeCallArgAmt - The total number of bytes used for the callee arg area.
3941 // For FastCC, this will always be > 0.
3942 unsigned CalleeCallArgAmt =
3943 cast<ConstantSDNode>(Chain.getOperand(2))->getValue();
3945 // CalleeCallArgPopAmt - The number of bytes in the call area popped by the
3946 // callee. For FastCC this will always be > 0, for CCC this is always 0.
3947 unsigned CalleeCallArgPopAmt =
3948 cast<ConstantSDNode>(Chain.getOperand(3))->getValue();
3950 // There are several cases we can handle here. First, if the caller and
3951 // callee are both CCC functions, we can tailcall if the callee takes <= the
3952 // number of argument bytes that the caller does.
3953 if (CalleeCallArgPopAmt == 0 && // Callee is C CallingConv?
3954 X86Lowering.getBytesToPopOnReturn() == 0) { // Caller is C CallingConv?
3955 // Check to see if caller arg area size >= callee arg area size.
3956 if (X86Lowering.getBytesCallerReserves() >= CalleeCallArgAmt) {
3957 //std::cerr << "CCC TAILCALL UNIMP!\n";
3958 // If TokFactor is non-null, emit all operands.
3960 //EmitCCCToCCCTailCall(Chain.Val);
3966 // Second, if both are FastCC functions, we can always perform the tail call.
3967 if (CalleeCallArgPopAmt && X86Lowering.getBytesToPopOnReturn()) {
3968 // If TokFactor is non-null, emit all operands before the call.
3969 if (TokFactor.Val) {
3970 for (unsigned i = 0, e = TokFactor.getNumOperands(); i != e; ++i)
3971 if (i != TokFactorOperand)
3972 Select(TokFactor.getOperand(i));
3975 EmitFastCCToFastCCTailCall(Chain.Val);
3979 // We don't support mixed calls, due to issues with alignment. We could in
3980 // theory handle some mixed calls from CCC -> FastCC if the stack is properly
3981 // aligned (which depends on the number of arguments to the callee). TODO.
3985 static SDOperand GetAdjustedArgumentStores(SDOperand Chain, int Offset,
3986 SelectionDAG &DAG) {
3987 MVT::ValueType StoreVT;
3988 switch (Chain.getOpcode()) {
3989 case ISD::CALLSEQ_START:
3990 // If we found the start of the call sequence, we're done. We actually
3991 // strip off the CALLSEQ_START node, to avoid generating the
3992 // ADJCALLSTACKDOWN marker for the tail call.
3993 return Chain.getOperand(0);
3994 case ISD::TokenFactor: {
3995 std::vector<SDOperand> Ops;
3996 Ops.reserve(Chain.getNumOperands());
3997 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
3998 Ops.push_back(GetAdjustedArgumentStores(Chain.getOperand(i), Offset,DAG));
3999 return DAG.getNode(ISD::TokenFactor, MVT::Other, Ops);
4001 case ISD::STORE: // Normal store
4002 StoreVT = Chain.getOperand(1).getValueType();
4004 case ISD::TRUNCSTORE: // FLOAT store
4005 StoreVT = cast<VTSDNode>(Chain.getOperand(4))->getVT();
4009 SDOperand OrigDest = Chain.getOperand(2);
4010 unsigned OrigOffset;
4012 if (OrigDest.getOpcode() == ISD::CopyFromReg) {
4014 assert(cast<RegSDNode>(OrigDest)->getReg() == X86::ESP);
4016 // We expect only (ESP+C)
4017 assert(OrigDest.getOpcode() == ISD::ADD &&
4018 isa<ConstantSDNode>(OrigDest.getOperand(1)) &&
4019 OrigDest.getOperand(0).getOpcode() == ISD::CopyFromReg &&
4020 cast<RegSDNode>(OrigDest.getOperand(0))->getReg() == X86::ESP);
4021 OrigOffset = cast<ConstantSDNode>(OrigDest.getOperand(1))->getValue();
4024 // Compute the new offset from the incoming ESP value we wish to use.
4025 unsigned NewOffset = OrigOffset + Offset;
4027 unsigned OpSize = (MVT::getSizeInBits(StoreVT)+7)/8; // Bits -> Bytes
4028 MachineFunction &MF = DAG.getMachineFunction();
4029 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, NewOffset);
4030 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
4032 SDOperand InChain = GetAdjustedArgumentStores(Chain.getOperand(0), Offset,
4034 if (Chain.getOpcode() == ISD::STORE)
4035 return DAG.getNode(ISD::STORE, MVT::Other, InChain, Chain.getOperand(1),
4037 assert(Chain.getOpcode() == ISD::TRUNCSTORE);
4038 return DAG.getNode(ISD::TRUNCSTORE, MVT::Other, InChain, Chain.getOperand(1),
4039 FIN, DAG.getSrcValue(NULL), DAG.getValueType(StoreVT));
4043 /// EmitFastCCToFastCCTailCall - Given a tailcall in the tail position to a
4044 /// fastcc function from a fastcc function, emit the code to emit a 'proper'
4046 void ISel::EmitFastCCToFastCCTailCall(SDNode *TailCallNode) {
4047 unsigned CalleeCallArgSize =
4048 cast<ConstantSDNode>(TailCallNode->getOperand(2))->getValue();
4049 unsigned CallerArgSize = X86Lowering.getBytesToPopOnReturn();
4051 //std::cerr << "****\n*** EMITTING TAIL CALL!\n****\n";
4053 // Adjust argument stores. Instead of storing to [ESP], f.e., store to frame
4054 // indexes that are relative to the incoming ESP. If the incoming and
4055 // outgoing arg sizes are the same we will store to [InESP] instead of
4056 // [CurESP] and the ESP referenced will be relative to the incoming function
4058 int ESPOffset = CallerArgSize-CalleeCallArgSize;
4059 SDOperand AdjustedArgStores =
4060 GetAdjustedArgumentStores(TailCallNode->getOperand(0), ESPOffset, *TheDAG);
4062 // Copy the return address of the caller into a virtual register so we don't
4066 SDOperand RetValAddr = X86Lowering.getReturnAddressFrameIndex(*TheDAG);
4067 RetVal = TheDAG->getLoad(MVT::i32, TheDAG->getEntryNode(),
4068 RetValAddr, TheDAG->getSrcValue(NULL));
4072 // Codegen all of the argument stores.
4073 Select(AdjustedArgStores);
4076 // Emit a store of the saved ret value to the new location.
4077 MachineFunction &MF = TheDAG->getMachineFunction();
4078 int ReturnAddrFI = MF.getFrameInfo()->CreateFixedObject(4, ESPOffset-4);
4079 SDOperand RetValAddr = TheDAG->getFrameIndex(ReturnAddrFI, MVT::i32);
4080 Select(TheDAG->getNode(ISD::STORE, MVT::Other, TheDAG->getEntryNode(),
4081 RetVal, RetValAddr));
4084 // Get the destination value.
4085 SDOperand Callee = TailCallNode->getOperand(1);
4086 bool isDirect = isa<GlobalAddressSDNode>(Callee) ||
4087 isa<ExternalSymbolSDNode>(Callee);
4088 unsigned CalleeReg = 0;
4089 if (!isDirect) CalleeReg = SelectExpr(Callee);
4091 unsigned RegOp1 = 0;
4092 unsigned RegOp2 = 0;
4094 if (TailCallNode->getNumOperands() > 4) {
4095 // The first value is passed in (a part of) EAX, the second in EDX.
4096 RegOp1 = SelectExpr(TailCallNode->getOperand(4));
4097 if (TailCallNode->getNumOperands() > 5)
4098 RegOp2 = SelectExpr(TailCallNode->getOperand(5));
4100 switch (TailCallNode->getOperand(4).getValueType()) {
4101 default: assert(0 && "Bad thing to pass in regs");
4104 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(RegOp1);
4108 BuildMI(BB, X86::MOV16rr, 1,X86::AX).addReg(RegOp1);
4112 BuildMI(BB, X86::MOV32rr, 1,X86::EAX).addReg(RegOp1);
4117 switch (TailCallNode->getOperand(5).getValueType()) {
4118 default: assert(0 && "Bad thing to pass in regs");
4121 BuildMI(BB, X86::MOV8rr, 1, X86::DL).addReg(RegOp2);
4125 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(RegOp2);
4129 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RegOp2);
4137 BuildMI(BB, X86::ADJSTACKPTRri, 2,
4138 X86::ESP).addReg(X86::ESP).addImm(ESPOffset);
4140 // TODO: handle jmp [mem]
4142 BuildMI(BB, X86::TAILJMPr, 1).addReg(CalleeReg);
4143 } else if (GlobalAddressSDNode *GASD = dyn_cast<GlobalAddressSDNode>(Callee)){
4144 BuildMI(BB, X86::TAILJMPd, 1).addGlobalAddress(GASD->getGlobal(), true);
4146 ExternalSymbolSDNode *ESSDN = cast<ExternalSymbolSDNode>(Callee);
4147 BuildMI(BB, X86::TAILJMPd, 1).addExternalSymbol(ESSDN->getSymbol(), true);
4149 // ADD IMPLICIT USE RegOp1/RegOp2's
4153 void ISel::Select(SDOperand N) {
4154 unsigned Tmp1, Tmp2, Opc;
4156 if (!ExprMap.insert(std::make_pair(N, 1)).second)
4157 return; // Already selected.
4159 SDNode *Node = N.Val;
4161 switch (Node->getOpcode()) {
4163 Node->dump(); std::cerr << "\n";
4164 assert(0 && "Node not handled yet!");
4165 case ISD::EntryToken: return; // Noop
4166 case ISD::TokenFactor:
4167 if (Node->getNumOperands() == 2) {
4169 getRegPressure(Node->getOperand(1))>getRegPressure(Node->getOperand(0));
4170 Select(Node->getOperand(OneFirst));
4171 Select(Node->getOperand(!OneFirst));
4173 std::vector<std::pair<unsigned, unsigned> > OpsP;
4174 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
4175 OpsP.push_back(std::make_pair(getRegPressure(Node->getOperand(i)), i));
4176 std::sort(OpsP.begin(), OpsP.end());
4177 std::reverse(OpsP.begin(), OpsP.end());
4178 for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
4179 Select(Node->getOperand(OpsP[i].second));
4182 case ISD::CopyToReg:
4183 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4184 Select(N.getOperand(0));
4185 Tmp1 = SelectExpr(N.getOperand(1));
4187 Tmp1 = SelectExpr(N.getOperand(1));
4188 Select(N.getOperand(0));
4190 Tmp2 = cast<RegSDNode>(N)->getReg();
4193 switch (N.getOperand(1).getValueType()) {
4194 default: assert(0 && "Invalid type for operation!");
4196 case MVT::i8: Opc = X86::MOV8rr; break;
4197 case MVT::i16: Opc = X86::MOV16rr; break;
4198 case MVT::i32: Opc = X86::MOV32rr; break;
4199 case MVT::f32: Opc = X86::MOVAPSrr; break;
4202 Opc = X86::MOVAPDrr;
4205 ContainsFPCode = true;
4209 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
4213 if (N.getOperand(0).getOpcode() == ISD::CALLSEQ_END ||
4214 N.getOperand(0).getOpcode() == X86ISD::TAILCALL ||
4215 N.getOperand(0).getOpcode() == ISD::TokenFactor)
4216 if (EmitPotentialTailCall(Node))
4219 switch (N.getNumOperands()) {
4221 assert(0 && "Unknown return instruction!");
4223 assert(N.getOperand(1).getValueType() == MVT::i32 &&
4224 N.getOperand(2).getValueType() == MVT::i32 &&
4225 "Unknown two-register value!");
4226 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
4227 Tmp1 = SelectExpr(N.getOperand(1));
4228 Tmp2 = SelectExpr(N.getOperand(2));
4230 Tmp2 = SelectExpr(N.getOperand(2));
4231 Tmp1 = SelectExpr(N.getOperand(1));
4233 Select(N.getOperand(0));
4235 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4236 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(Tmp2);
4239 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4240 Select(N.getOperand(0));
4241 Tmp1 = SelectExpr(N.getOperand(1));
4243 Tmp1 = SelectExpr(N.getOperand(1));
4244 Select(N.getOperand(0));
4246 switch (N.getOperand(1).getValueType()) {
4247 default: assert(0 && "All other types should have been promoted!!");
4250 // Spill the value to memory and reload it into top of stack.
4251 unsigned Size = MVT::getSizeInBits(MVT::f32)/8;
4252 MachineFunction *F = BB->getParent();
4253 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
4254 addFrameReference(BuildMI(BB, X86::MOVSSmr, 5), FrameIdx).addReg(Tmp1);
4255 addFrameReference(BuildMI(BB, X86::FLD32m, 4, X86::FP0), FrameIdx);
4256 BuildMI(BB, X86::FpSETRESULT, 1).addReg(X86::FP0);
4257 ContainsFPCode = true;
4259 assert(0 && "MVT::f32 only legal with scalar sse fp");
4265 // Spill the value to memory and reload it into top of stack.
4266 unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
4267 MachineFunction *F = BB->getParent();
4268 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
4269 addFrameReference(BuildMI(BB, X86::MOVSDmr, 5), FrameIdx).addReg(Tmp1);
4270 addFrameReference(BuildMI(BB, X86::FLD64m, 4, X86::FP0), FrameIdx);
4271 BuildMI(BB, X86::FpSETRESULT, 1).addReg(X86::FP0);
4272 ContainsFPCode = true;
4274 BuildMI(BB, X86::FpSETRESULT, 1).addReg(Tmp1);
4278 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4283 Select(N.getOperand(0));
4286 if (X86Lowering.getBytesToPopOnReturn() == 0)
4287 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
4289 BuildMI(BB, X86::RETI, 1).addImm(X86Lowering.getBytesToPopOnReturn());
4292 Select(N.getOperand(0));
4293 MachineBasicBlock *Dest =
4294 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
4295 BuildMI(BB, X86::JMP, 1).addMBB(Dest);
4300 MachineBasicBlock *Dest =
4301 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
4303 // Try to fold a setcc into the branch. If this fails, emit a test/jne
4305 if (EmitBranchCC(Dest, N.getOperand(0), N.getOperand(1))) {
4306 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
4307 Select(N.getOperand(0));
4308 Tmp1 = SelectExpr(N.getOperand(1));
4310 Tmp1 = SelectExpr(N.getOperand(1));
4311 Select(N.getOperand(0));
4313 BuildMI(BB, X86::TEST8rr, 2).addReg(Tmp1).addReg(Tmp1);
4314 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
4321 // If this load could be folded into the only using instruction, and if it
4322 // is safe to emit the instruction here, try to do so now.
4323 if (Node->hasNUsesOfValue(1, 0)) {
4324 SDOperand TheVal = N.getValue(0);
4326 for (SDNode::use_iterator UI = Node->use_begin(); ; ++UI) {
4327 assert(UI != Node->use_end() && "Didn't find use!");
4329 for (unsigned i = 0, e = UN->getNumOperands(); i != e; ++i)
4330 if (UN->getOperand(i) == TheVal) {
4336 // Only handle unary operators right now.
4337 if (User->getNumOperands() == 1) {
4339 SelectExpr(SDOperand(User, 0));
4350 case ISD::DYNAMIC_STACKALLOC:
4351 case X86ISD::TAILCALL:
4356 case ISD::CopyFromReg:
4357 case X86ISD::FILD64m:
4359 SelectExpr(N.getValue(0));
4362 case X86ISD::FP_TO_INT16_IN_MEM:
4363 case X86ISD::FP_TO_INT32_IN_MEM:
4364 case X86ISD::FP_TO_INT64_IN_MEM: {
4365 assert(N.getOperand(1).getValueType() == MVT::f64);
4367 Select(N.getOperand(0)); // Select the token chain
4370 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
4371 ValReg = SelectExpr(N.getOperand(1));
4372 SelectAddress(N.getOperand(2), AM);
4374 SelectAddress(N.getOperand(2), AM);
4375 ValReg = SelectExpr(N.getOperand(1));
4378 // Change the floating point control register to use "round towards zero"
4379 // mode when truncating to an integer value.
4381 MachineFunction *F = BB->getParent();
4382 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
4383 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
4385 // Load the old value of the high byte of the control word...
4386 unsigned OldCW = MakeReg(MVT::i16);
4387 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
4389 // Set the high part to be round to zero...
4390 addFrameReference(BuildMI(BB, X86::MOV16mi, 5), CWFrameIdx).addImm(0xC7F);
4392 // Reload the modified control word now...
4393 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
4395 // Restore the memory image of control word to original value
4396 addFrameReference(BuildMI(BB, X86::MOV16mr, 5), CWFrameIdx).addReg(OldCW);
4398 // Get the X86 opcode to use.
4399 switch (N.getOpcode()) {
4400 case X86ISD::FP_TO_INT16_IN_MEM: Tmp1 = X86::FIST16m; break;
4401 case X86ISD::FP_TO_INT32_IN_MEM: Tmp1 = X86::FIST32m; break;
4402 case X86ISD::FP_TO_INT64_IN_MEM: Tmp1 = X86::FISTP64m; break;
4405 addFullAddress(BuildMI(BB, Tmp1, 5), AM).addReg(ValReg);
4407 // Reload the original control word now.
4408 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
4412 case ISD::TRUNCSTORE: { // truncstore chain, val, ptr, SRCVALUE, storety
4414 MVT::ValueType StoredTy = cast<VTSDNode>(N.getOperand(4))->getVT();
4415 assert((StoredTy == MVT::i1 || StoredTy == MVT::f32 ||
4416 StoredTy == MVT::i16 /*FIXME: THIS IS JUST FOR TESTING!*/)
4417 && "Unsupported TRUNCSTORE for this target!");
4419 if (StoredTy == MVT::i16) {
4420 // FIXME: This is here just to allow testing. X86 doesn't really have a
4421 // TRUNCSTORE i16 operation, but this is required for targets that do not
4422 // have 16-bit integer registers. We occasionally disable 16-bit integer
4423 // registers to test the promotion code.
4424 Select(N.getOperand(0));
4425 Tmp1 = SelectExpr(N.getOperand(1));
4426 SelectAddress(N.getOperand(2), AM);
4428 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4429 addFullAddress(BuildMI(BB, X86::MOV16mr, 5), AM).addReg(X86::AX);
4433 // Store of constant bool?
4434 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
4435 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4436 Select(N.getOperand(0));
4437 SelectAddress(N.getOperand(2), AM);
4439 SelectAddress(N.getOperand(2), AM);
4440 Select(N.getOperand(0));
4442 addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CN->getValue());
4447 default: assert(0 && "Cannot truncstore this type!");
4448 case MVT::i1: Opc = X86::MOV8mr; break;
4450 assert(!X86ScalarSSE && "Cannot truncstore scalar SSE regs");
4451 Opc = X86::FST32m; break;
4454 std::vector<std::pair<unsigned, unsigned> > RP;
4455 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
4456 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
4457 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
4458 std::sort(RP.begin(), RP.end());
4460 Tmp1 = 0; // Silence a warning.
4461 for (unsigned i = 0; i != 3; ++i)
4462 switch (RP[2-i].second) {
4463 default: assert(0 && "Unknown operand number!");
4464 case 0: Select(N.getOperand(0)); break;
4465 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
4466 case 2: SelectAddress(N.getOperand(2), AM); break;
4469 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
4475 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
4477 switch (CN->getValueType(0)) {
4478 default: assert(0 && "Invalid type for operation!");
4480 case MVT::i8: Opc = X86::MOV8mi; break;
4481 case MVT::i16: Opc = X86::MOV16mi; break;
4482 case MVT::i32: Opc = X86::MOV32mi; break;
4485 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4486 Select(N.getOperand(0));
4487 SelectAddress(N.getOperand(2), AM);
4489 SelectAddress(N.getOperand(2), AM);
4490 Select(N.getOperand(0));
4492 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addImm(CN->getValue());
4495 } else if (GlobalAddressSDNode *GA =
4496 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
4497 assert(GA->getValueType(0) == MVT::i32 && "Bad pointer operand");
4499 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
4500 Select(N.getOperand(0));
4501 SelectAddress(N.getOperand(2), AM);
4503 SelectAddress(N.getOperand(2), AM);
4504 Select(N.getOperand(0));
4506 GlobalValue *GV = GA->getGlobal();
4507 // For Darwin, external and weak symbols are indirect, so we want to load
4508 // the value at address GV, not the value of GV itself.
4509 if (Subtarget->getIndirectExternAndWeakGlobals() &&
4510 (GV->hasWeakLinkage() || GV->isExternal())) {
4511 Tmp1 = MakeReg(MVT::i32);
4512 BuildMI(BB, X86::MOV32rm, 4, Tmp1).addReg(0).addZImm(1).addReg(0)
4513 .addGlobalAddress(GV, false, 0);
4514 addFullAddress(BuildMI(BB, X86::MOV32mr, 4+1),AM).addReg(Tmp1);
4516 addFullAddress(BuildMI(BB, X86::MOV32mi, 4+1),AM).addGlobalAddress(GV);
4521 // Check to see if this is a load/op/store combination.
4522 if (TryToFoldLoadOpStore(Node))
4525 switch (N.getOperand(1).getValueType()) {
4526 default: assert(0 && "Cannot store this type!");
4528 case MVT::i8: Opc = X86::MOV8mr; break;
4529 case MVT::i16: Opc = X86::MOV16mr; break;
4530 case MVT::i32: Opc = X86::MOV32mr; break;
4531 case MVT::f32: Opc = X86::MOVSSmr; break;
4532 case MVT::f64: Opc = X86ScalarSSE ? X86::MOVSDmr : X86::FST64m; break;
4535 std::vector<std::pair<unsigned, unsigned> > RP;
4536 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
4537 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
4538 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
4539 std::sort(RP.begin(), RP.end());
4541 Tmp1 = 0; // Silence a warning.
4542 for (unsigned i = 0; i != 3; ++i)
4543 switch (RP[2-i].second) {
4544 default: assert(0 && "Unknown operand number!");
4545 case 0: Select(N.getOperand(0)); break;
4546 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
4547 case 2: SelectAddress(N.getOperand(2), AM); break;
4550 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
4553 case ISD::CALLSEQ_START:
4554 Select(N.getOperand(0));
4556 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
4557 BuildMI(BB, X86::ADJCALLSTACKDOWN, 1).addImm(Tmp1);
4559 case ISD::CALLSEQ_END:
4560 Select(N.getOperand(0));
4563 Select(N.getOperand(0)); // Select the chain.
4565 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
4566 if (Align == 0) Align = 1;
4568 // Turn the byte code into # iterations
4571 if (ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
4572 unsigned Val = ValC->getValue() & 255;
4574 // If the value is a constant, then we can potentially use larger sets.
4575 switch (Align & 3) {
4576 case 2: // WORD aligned
4577 CountReg = MakeReg(MVT::i32);
4578 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4579 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
4581 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4582 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
4584 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
4585 Opcode = X86::REP_STOSW;
4587 case 0: // DWORD aligned
4588 CountReg = MakeReg(MVT::i32);
4589 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4590 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
4592 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4593 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
4595 Val = (Val << 8) | Val;
4596 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
4597 Opcode = X86::REP_STOSD;
4599 default: // BYTE aligned
4600 CountReg = SelectExpr(Node->getOperand(3));
4601 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
4602 Opcode = X86::REP_STOSB;
4606 // If it's not a constant value we are storing, just fall back. We could
4607 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
4608 unsigned ValReg = SelectExpr(Node->getOperand(2));
4609 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
4610 CountReg = SelectExpr(Node->getOperand(3));
4611 Opcode = X86::REP_STOSB;
4614 // No matter what the alignment is, we put the source in ESI, the
4615 // destination in EDI, and the count in ECX.
4616 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
4617 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
4618 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
4619 BuildMI(BB, Opcode, 0);
4623 Select(N.getOperand(0)); // Select the chain.
4625 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
4626 if (Align == 0) Align = 1;
4628 // Turn the byte code into # iterations
4631 switch (Align & 3) {
4632 case 2: // WORD aligned
4633 CountReg = MakeReg(MVT::i32);
4634 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4635 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
4637 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4638 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
4640 Opcode = X86::REP_MOVSW;
4642 case 0: // DWORD aligned
4643 CountReg = MakeReg(MVT::i32);
4644 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
4645 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
4647 unsigned ByteReg = SelectExpr(Node->getOperand(3));
4648 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
4650 Opcode = X86::REP_MOVSD;
4652 default: // BYTE aligned
4653 CountReg = SelectExpr(Node->getOperand(3));
4654 Opcode = X86::REP_MOVSB;
4658 // No matter what the alignment is, we put the source in ESI, the
4659 // destination in EDI, and the count in ECX.
4660 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
4661 unsigned TmpReg2 = SelectExpr(Node->getOperand(2));
4662 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
4663 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
4664 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
4665 BuildMI(BB, Opcode, 0);
4668 case ISD::WRITEPORT:
4669 if (Node->getOperand(2).getValueType() != MVT::i16) {
4670 std::cerr << "llvm.writeport: Address size is not 16 bits\n";
4673 Select(Node->getOperand(0)); // Emit the chain.
4675 Tmp1 = SelectExpr(Node->getOperand(1));
4676 switch (Node->getOperand(1).getValueType()) {
4678 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
4679 Tmp2 = X86::OUT8ir; Opc = X86::OUT8rr;
4682 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(Tmp1);
4683 Tmp2 = X86::OUT16ir; Opc = X86::OUT16rr;
4686 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
4687 Tmp2 = X86::OUT32ir; Opc = X86::OUT32rr;
4690 std::cerr << "llvm.writeport: invalid data type for X86 target";
4694 // If the port is a single-byte constant, use the immediate form.
4695 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Node->getOperand(2)))
4696 if ((CN->getValue() & 255) == CN->getValue()) {
4697 BuildMI(BB, Tmp2, 1).addImm(CN->getValue());
4701 // Otherwise, move the I/O port address into the DX register.
4702 unsigned Reg = SelectExpr(Node->getOperand(2));
4703 BuildMI(BB, X86::MOV16rr, 1, X86::DX).addReg(Reg);
4704 BuildMI(BB, Opc, 0);
4707 assert(0 && "Should not be reached!");
4711 /// createX86PatternInstructionSelector - This pass converts an LLVM function
4712 /// into a machine code representation using pattern matching and a machine
4713 /// description file.
4715 FunctionPass *llvm::createX86PatternInstructionSelector(TargetMachine &TM) {
4716 return new ISel(TM);