1 //===-- X86ISelPattern.cpp - A pattern matching inst selector for X86 -----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for X86.
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86RegisterInfo.h"
17 #include "llvm/Constants.h" // FIXME: REMOVE
18 #include "llvm/Function.h"
19 #include "llvm/CodeGen/MachineConstantPool.h" // FIXME: REMOVE
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/CodeGen/SelectionDAGISel.h"
24 #include "llvm/CodeGen/SSARegMap.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Target/TargetLowering.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/ADT/Statistic.h"
32 //===----------------------------------------------------------------------===//
33 // X86TargetLowering - X86 Implementation of the TargetLowering interface
35 class X86TargetLowering : public TargetLowering {
36 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
37 int ReturnAddrIndex; // FrameIndex for return slot.
39 X86TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
40 // Set up the TargetLowering object.
41 addRegisterClass(MVT::i8, X86::R8RegisterClass);
42 addRegisterClass(MVT::i16, X86::R16RegisterClass);
43 addRegisterClass(MVT::i32, X86::R32RegisterClass);
44 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
46 // FIXME: Eliminate these two classes when legalize can handle promotions
48 addRegisterClass(MVT::i1, X86::R8RegisterClass);
49 addRegisterClass(MVT::f32, X86::RFPRegisterClass);
51 computeRegisterProperties();
53 setOperationUnsupported(ISD::MEMMOVE, MVT::Other);
55 setOperationUnsupported(ISD::MUL, MVT::i8);
56 setOperationUnsupported(ISD::SELECT, MVT::i1);
57 setOperationUnsupported(ISD::SELECT, MVT::i8);
59 addLegalFPImmediate(+0.0); // FLD0
60 addLegalFPImmediate(+1.0); // FLD1
61 addLegalFPImmediate(-0.0); // FLD0/FCHS
62 addLegalFPImmediate(-1.0); // FLD1/FCHS
65 /// LowerArguments - This hook must be implemented to indicate how we should
66 /// lower the arguments for the specified function, into the specified DAG.
67 virtual std::vector<SDOperand>
68 LowerArguments(Function &F, SelectionDAG &DAG);
70 /// LowerCallTo - This hook lowers an abstract call to a function into an
72 virtual std::pair<SDOperand, SDOperand>
73 LowerCallTo(SDOperand Chain, const Type *RetTy, SDOperand Callee,
74 ArgListTy &Args, SelectionDAG &DAG);
76 virtual std::pair<SDOperand, SDOperand>
77 LowerVAStart(SDOperand Chain, SelectionDAG &DAG);
79 virtual std::pair<SDOperand,SDOperand>
80 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
81 const Type *ArgTy, SelectionDAG &DAG);
83 virtual std::pair<SDOperand, SDOperand>
84 LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
90 std::vector<SDOperand>
91 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
92 std::vector<SDOperand> ArgValues;
94 // Add DAG nodes to load the arguments... On entry to a function on the X86,
95 // the stack frame looks like this:
97 // [ESP] -- return address
98 // [ESP + 4] -- first argument (leftmost lexically)
99 // [ESP + 8] -- second argument, if first argument is four bytes in size
102 MachineFunction &MF = DAG.getMachineFunction();
103 MachineFrameInfo *MFI = MF.getFrameInfo();
105 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
106 for (Function::aiterator I = F.abegin(), E = F.aend(); I != E; ++I) {
107 MVT::ValueType ObjectVT = getValueType(I->getType());
108 unsigned ArgIncrement = 4;
111 default: assert(0 && "Unhandled argument type!");
113 case MVT::i8: ObjSize = 1; break;
114 case MVT::i16: ObjSize = 2; break;
115 case MVT::i32: ObjSize = 4; break;
116 case MVT::i64: ObjSize = ArgIncrement = 8; break;
117 case MVT::f32: ObjSize = 4; break;
118 case MVT::f64: ObjSize = ArgIncrement = 8; break;
120 // Create the frame index object for this incoming parameter...
121 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
123 // Create the SelectionDAG nodes corresponding to a load from this parameter
124 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
126 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
130 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN);
132 if (MVT::isInteger(ObjectVT))
133 ArgValue = DAG.getConstant(0, ObjectVT);
135 ArgValue = DAG.getConstantFP(0, ObjectVT);
137 ArgValues.push_back(ArgValue);
139 ArgOffset += ArgIncrement; // Move on to the next argument...
142 // If the function takes variable number of arguments, make a frame index for
143 // the start of the first vararg value... for expansion of llvm.va_start.
145 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
146 ReturnAddrIndex = 0; // No return address slot generated yet.
150 std::pair<SDOperand, SDOperand>
151 X86TargetLowering::LowerCallTo(SDOperand Chain,
152 const Type *RetTy, SDOperand Callee,
153 ArgListTy &Args, SelectionDAG &DAG) {
154 // Count how many bytes are to be pushed on the stack.
155 unsigned NumBytes = 0;
159 Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
160 DAG.getConstant(0, getPointerTy()));
162 for (unsigned i = 0, e = Args.size(); i != e; ++i)
163 switch (getValueType(Args[i].second)) {
164 default: assert(0 && "Unknown value type!");
178 Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
179 DAG.getConstant(NumBytes, getPointerTy()));
181 // Arguments go on the stack in reverse order, as specified by the ABI.
182 unsigned ArgOffset = 0;
183 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32);
184 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
186 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
187 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
189 switch (getValueType(Args[i].second)) {
190 default: assert(0 && "Unexpected ValueType for argument!");
194 // Promote the integer to 32 bits. If the input type is signed use a
195 // sign extend, otherwise use a zero extend.
196 if (Args[i].second->isSigned())
197 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
199 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
204 // FIXME: Note that all of these stores are independent of each other.
205 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
206 Args[i].first, PtrOff);
211 // FIXME: Note that all of these stores are independent of each other.
212 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
213 Args[i].first, PtrOff);
220 std::vector<MVT::ValueType> RetVals;
221 MVT::ValueType RetTyVT = getValueType(RetTy);
222 if (RetTyVT != MVT::isVoid)
223 RetVals.push_back(RetTyVT);
224 RetVals.push_back(MVT::Other);
226 SDOperand TheCall = SDOperand(DAG.getCall(RetVals, Chain, Callee), 0);
227 Chain = TheCall.getValue(RetTyVT != MVT::isVoid);
228 Chain = DAG.getNode(ISD::ADJCALLSTACKUP, MVT::Other, Chain,
229 DAG.getConstant(NumBytes, getPointerTy()));
230 return std::make_pair(TheCall, Chain);
233 std::pair<SDOperand, SDOperand>
234 X86TargetLowering::LowerVAStart(SDOperand Chain, SelectionDAG &DAG) {
235 // vastart just returns the address of the VarArgsFrameIndex slot.
236 return std::make_pair(DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32), Chain);
239 std::pair<SDOperand,SDOperand> X86TargetLowering::
240 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
241 const Type *ArgTy, SelectionDAG &DAG) {
242 MVT::ValueType ArgVT = getValueType(ArgTy);
245 Result = DAG.getLoad(ArgVT, DAG.getEntryNode(), VAList);
248 if (ArgVT == MVT::i32)
251 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
252 "Other types should have been promoted for varargs!");
255 Result = DAG.getNode(ISD::ADD, VAList.getValueType(), VAList,
256 DAG.getConstant(Amt, VAList.getValueType()));
258 return std::make_pair(Result, Chain);
262 std::pair<SDOperand, SDOperand> X86TargetLowering::
263 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
266 if (Depth) // Depths > 0 not supported yet!
267 Result = DAG.getConstant(0, getPointerTy());
269 if (ReturnAddrIndex == 0) {
270 // Set up a frame object for the return address.
271 MachineFunction &MF = DAG.getMachineFunction();
272 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
275 SDOperand RetAddrFI = DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
278 // Just load the return address
279 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI);
281 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
282 DAG.getConstant(4, MVT::i32));
284 return std::make_pair(Result, Chain);
293 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
295 //===--------------------------------------------------------------------===//
296 /// ISel - X86 specific code to select X86 machine instructions for
297 /// SelectionDAG operations.
299 class ISel : public SelectionDAGISel {
300 /// ContainsFPCode - Every instruction we select that uses or defines a FP
301 /// register should set this to true.
304 /// X86Lowering - This object fully describes how to lower LLVM code to an
305 /// X86-specific SelectionDAG.
306 X86TargetLowering X86Lowering;
308 /// RegPressureMap - This keeps an approximate count of the number of
309 /// registers required to evaluate each node in the graph.
310 std::map<SDNode*, unsigned> RegPressureMap;
312 /// ExprMap - As shared expressions are codegen'd, we keep track of which
313 /// vreg the value is produced in, so we only emit one copy of each compiled
315 std::map<SDOperand, unsigned> ExprMap;
316 std::set<SDOperand> LoweredTokens;
319 ISel(TargetMachine &TM) : SelectionDAGISel(X86Lowering), X86Lowering(TM) {
322 unsigned getRegPressure(SDOperand O) {
323 return RegPressureMap[O.Val];
325 unsigned ComputeRegPressure(SDOperand O);
327 /// InstructionSelectBasicBlock - This callback is invoked by
328 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
329 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG) {
330 // While we're doing this, keep track of whether we see any FP code for
331 // FP_REG_KILL insertion.
332 ContainsFPCode = false;
334 // Compute the RegPressureMap, which is an approximation for the number of
335 // registers required to compute each node.
336 ComputeRegPressure(DAG.getRoot());
340 // Codegen the basic block.
341 Select(DAG.getRoot());
343 // Insert FP_REG_KILL instructions into basic blocks that need them. This
344 // only occurs due to the floating point stackifier not being aggressive
345 // enough to handle arbitrary global stackification.
347 // Currently we insert an FP_REG_KILL instruction into each block that
348 // uses or defines a floating point virtual register.
350 // When the global register allocators (like linear scan) finally update
351 // live variable analysis, we can keep floating point values in registers
352 // across basic blocks. This will be a huge win, but we are waiting on
353 // the global allocators before we can do this.
355 if (ContainsFPCode && BB->succ_size()) {
356 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
360 // Clear state used for selection.
362 LoweredTokens.clear();
363 RegPressureMap.clear();
366 bool isFoldableLoad(SDOperand Op);
367 void EmitFoldedLoad(SDOperand Op, X86AddressMode &AM);
370 void EmitCMP(SDOperand LHS, SDOperand RHS);
371 bool EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, SDOperand Cond);
372 void EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
373 unsigned RTrue, unsigned RFalse, unsigned RDest);
374 unsigned SelectExpr(SDOperand N);
375 bool SelectAddress(SDOperand N, X86AddressMode &AM);
376 void Select(SDOperand N);
380 // ComputeRegPressure - Compute the RegPressureMap, which is an approximation
381 // for the number of registers required to compute each node. This is basically
382 // computing a generalized form of the Sethi-Ullman number for each node.
383 unsigned ISel::ComputeRegPressure(SDOperand O) {
385 unsigned &Result = RegPressureMap[N];
386 if (Result) return Result;
388 // FIXME: Should operations like CALL (which clobber lots o regs) have a
389 // higher fixed cost??
391 if (N->getNumOperands() == 0) {
394 unsigned MaxRegUse = 0;
395 unsigned NumExtraMaxRegUsers = 0;
396 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
398 if (N->getOperand(i).getOpcode() == ISD::Constant)
401 Regs = ComputeRegPressure(N->getOperand(i));
402 if (Regs > MaxRegUse) {
404 NumExtraMaxRegUsers = 0;
405 } else if (Regs == MaxRegUse &&
406 N->getOperand(i).getValueType() != MVT::Other) {
407 ++NumExtraMaxRegUsers;
411 Result = MaxRegUse+NumExtraMaxRegUsers;
413 std::cerr << " WEIGHT: " << Result << " "; N->dump(); std::cerr << "\n";
417 /// SelectAddress - Add the specified node to the specified addressing mode,
418 /// returning true if it cannot be done.
419 bool ISel::SelectAddress(SDOperand N, X86AddressMode &AM) {
420 switch (N.getOpcode()) {
422 case ISD::FrameIndex:
423 if (AM.BaseType == X86AddressMode::RegBase && AM.Base.Reg == 0) {
424 AM.BaseType = X86AddressMode::FrameIndexBase;
425 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
429 case ISD::GlobalAddress:
431 AM.GV = cast<GlobalAddressSDNode>(N)->getGlobal();
436 AM.Disp += cast<ConstantSDNode>(N)->getValue();
439 if (AM.IndexReg == 0 || AM.Scale == 1)
440 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
441 unsigned Val = CN->getValue();
442 if (Val == 1 || Val == 2 || Val == 3) {
444 SDOperand ShVal = N.Val->getOperand(0);
446 // Okay, we know that we have a scale by now. However, if the scaled
447 // value is an add of something and a constant, we can fold the
448 // constant into the disp field here.
449 if (ShVal.Val->getOpcode() == ISD::ADD &&
450 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
451 AM.IndexReg = SelectExpr(ShVal.Val->getOperand(0));
452 ConstantSDNode *AddVal =
453 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
454 AM.Disp += AddVal->getValue() << Val;
456 AM.IndexReg = SelectExpr(ShVal);
463 // X*[3,5,9] -> X+X*[2,4,8]
464 if (AM.IndexReg == 0 && AM.BaseType == X86AddressMode::RegBase &&
466 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
467 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
468 AM.Scale = unsigned(CN->getValue())-1;
470 SDOperand MulVal = N.Val->getOperand(0);
473 // Okay, we know that we have a scale by now. However, if the scaled
474 // value is an add of something and a constant, we can fold the
475 // constant into the disp field here.
476 if (MulVal.Val->getOpcode() == ISD::ADD &&
477 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
478 Reg = SelectExpr(MulVal.Val->getOperand(0));
479 ConstantSDNode *AddVal =
480 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
481 AM.Disp += AddVal->getValue() * CN->getValue();
483 Reg = SelectExpr(N.Val->getOperand(0));
486 AM.IndexReg = AM.Base.Reg = Reg;
492 X86AddressMode Backup = AM;
493 if (!SelectAddress(N.Val->getOperand(0), AM) &&
494 !SelectAddress(N.Val->getOperand(1), AM))
501 // Is the base register already occupied?
502 if (AM.BaseType != X86AddressMode::RegBase || AM.Base.Reg) {
503 // If so, check to see if the scale index register is set.
504 if (AM.IndexReg == 0) {
505 AM.IndexReg = SelectExpr(N);
510 // Otherwise, we cannot select it.
514 // Default, generate it as a register.
515 AM.BaseType = X86AddressMode::RegBase;
516 AM.Base.Reg = SelectExpr(N);
520 /// Emit2SetCCsAndLogical - Emit the following sequence of instructions,
521 /// assuming that the temporary registers are in the 8-bit register class.
525 /// DestReg = logicalop Tmp1, Tmp2
527 static void Emit2SetCCsAndLogical(MachineBasicBlock *BB, unsigned SetCC1,
528 unsigned SetCC2, unsigned LogicalOp,
530 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
531 unsigned Tmp1 = RegMap->createVirtualRegister(X86::R8RegisterClass);
532 unsigned Tmp2 = RegMap->createVirtualRegister(X86::R8RegisterClass);
533 BuildMI(BB, SetCC1, 0, Tmp1);
534 BuildMI(BB, SetCC2, 0, Tmp2);
535 BuildMI(BB, LogicalOp, 2, DestReg).addReg(Tmp1).addReg(Tmp2);
538 /// EmitSetCC - Emit the code to set the specified 8-bit register to 1 if the
539 /// condition codes match the specified SetCCOpcode. Note that some conditions
540 /// require multiple instructions to generate the correct value.
541 static void EmitSetCC(MachineBasicBlock *BB, unsigned DestReg,
542 ISD::CondCode SetCCOpcode, bool isFP) {
545 switch (SetCCOpcode) {
546 default: assert(0 && "Illegal integer SetCC!");
547 case ISD::SETEQ: Opc = X86::SETEr; break;
548 case ISD::SETGT: Opc = X86::SETGr; break;
549 case ISD::SETGE: Opc = X86::SETGEr; break;
550 case ISD::SETLT: Opc = X86::SETLr; break;
551 case ISD::SETLE: Opc = X86::SETLEr; break;
552 case ISD::SETNE: Opc = X86::SETNEr; break;
553 case ISD::SETULT: Opc = X86::SETBr; break;
554 case ISD::SETUGT: Opc = X86::SETAr; break;
555 case ISD::SETULE: Opc = X86::SETBEr; break;
556 case ISD::SETUGE: Opc = X86::SETAEr; break;
559 // On a floating point condition, the flags are set as follows:
563 // 1 | 0 | 0 | X == Y
564 // 1 | 1 | 1 | unordered
566 switch (SetCCOpcode) {
567 default: assert(0 && "Invalid FP setcc!");
570 Opc = X86::SETEr; // True if ZF = 1
574 Opc = X86::SETAr; // True if CF = 0 and ZF = 0
578 Opc = X86::SETAEr; // True if CF = 0
582 Opc = X86::SETBr; // True if CF = 1
586 Opc = X86::SETBEr; // True if CF = 1 or ZF = 1
590 Opc = X86::SETNEr; // True if ZF = 0
593 Opc = X86::SETPr; // True if PF = 1
596 Opc = X86::SETNPr; // True if PF = 0
598 case ISD::SETOEQ: // !PF & ZF
599 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETEr, X86::AND8rr, DestReg);
601 case ISD::SETOLT: // !PF & CF
602 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBr, X86::AND8rr, DestReg);
604 case ISD::SETOLE: // !PF & (CF || ZF)
605 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBEr, X86::AND8rr, DestReg);
607 case ISD::SETUGT: // PF | (!ZF & !CF)
608 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAr, X86::OR8rr, DestReg);
610 case ISD::SETUGE: // PF | !CF
611 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAEr, X86::OR8rr, DestReg);
613 case ISD::SETUNE: // PF | !ZF
614 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETNEr, X86::OR8rr, DestReg);
618 BuildMI(BB, Opc, 0, DestReg);
622 /// EmitBranchCC - Emit code into BB that arranges for control to transfer to
623 /// the Dest block if the Cond condition is true. If we cannot fold this
624 /// condition into the branch, return true.
626 bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain,
628 // FIXME: Evaluate whether it would be good to emit code like (X < Y) | (A >
629 // B) using two conditional branches instead of one condbr, two setcc's, and
631 if ((Cond.getOpcode() == ISD::OR ||
632 Cond.getOpcode() == ISD::AND) && Cond.Val->hasOneUse()) {
633 // And and or set the flags for us, so there is no need to emit a TST of the
634 // result. It is only safe to do this if there is only a single use of the
635 // AND/OR though, otherwise we don't know it will be emitted here.
638 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
642 // Codegen br not C -> JE.
643 if (Cond.getOpcode() == ISD::XOR)
644 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(Cond.Val->getOperand(1)))
645 if (NC->isAllOnesValue()) {
647 if (getRegPressure(Chain) > getRegPressure(Cond)) {
649 CondR = SelectExpr(Cond.Val->getOperand(0));
651 CondR = SelectExpr(Cond.Val->getOperand(0));
654 BuildMI(BB, X86::TEST8rr, 2).addReg(CondR).addReg(CondR);
655 BuildMI(BB, X86::JE, 1).addMBB(Dest);
659 SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond);
661 return true; // Can only handle simple setcc's so far.
665 // Handle integer conditions first.
666 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
667 switch (SetCC->getCondition()) {
668 default: assert(0 && "Illegal integer SetCC!");
669 case ISD::SETEQ: Opc = X86::JE; break;
670 case ISD::SETGT: Opc = X86::JG; break;
671 case ISD::SETGE: Opc = X86::JGE; break;
672 case ISD::SETLT: Opc = X86::JL; break;
673 case ISD::SETLE: Opc = X86::JLE; break;
674 case ISD::SETNE: Opc = X86::JNE; break;
675 case ISD::SETULT: Opc = X86::JB; break;
676 case ISD::SETUGT: Opc = X86::JA; break;
677 case ISD::SETULE: Opc = X86::JBE; break;
678 case ISD::SETUGE: Opc = X86::JAE; break;
681 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1));
682 BuildMI(BB, Opc, 1).addMBB(Dest);
686 unsigned Opc2 = 0; // Second branch if needed.
688 // On a floating point condition, the flags are set as follows:
692 // 1 | 0 | 0 | X == Y
693 // 1 | 1 | 1 | unordered
695 switch (SetCC->getCondition()) {
696 default: assert(0 && "Invalid FP setcc!");
698 case ISD::SETEQ: Opc = X86::JE; break; // True if ZF = 1
700 case ISD::SETGT: Opc = X86::JA; break; // True if CF = 0 and ZF = 0
702 case ISD::SETGE: Opc = X86::JAE; break; // True if CF = 0
704 case ISD::SETLT: Opc = X86::JB; break; // True if CF = 1
706 case ISD::SETLE: Opc = X86::JBE; break; // True if CF = 1 or ZF = 1
708 case ISD::SETNE: Opc = X86::JNE; break; // True if ZF = 0
709 case ISD::SETUO: Opc = X86::JP; break; // True if PF = 1
710 case ISD::SETO: Opc = X86::JNP; break; // True if PF = 0
711 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
712 Opc = X86::JA; // ZF = 0 & CF = 0
713 Opc2 = X86::JP; // PF = 1
715 case ISD::SETUGE: // PF = 1 | CF = 0
716 Opc = X86::JAE; // CF = 0
717 Opc2 = X86::JP; // PF = 1
719 case ISD::SETUNE: // PF = 1 | ZF = 0
720 Opc = X86::JNE; // ZF = 0
721 Opc2 = X86::JP; // PF = 1
723 case ISD::SETOEQ: // PF = 0 & ZF = 1
726 return true; // FIXME: Emit more efficient code for this branch.
727 case ISD::SETOLT: // PF = 0 & CF = 1
730 return true; // FIXME: Emit more efficient code for this branch.
731 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
734 return true; // FIXME: Emit more efficient code for this branch.
738 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1));
739 BuildMI(BB, Opc, 1).addMBB(Dest);
741 BuildMI(BB, Opc2, 1).addMBB(Dest);
745 /// EmitSelectCC - Emit code into BB that performs a select operation between
746 /// the two registers RTrue and RFalse, generating a result into RDest. Return
747 /// true if the fold cannot be performed.
749 void ISel::EmitSelectCC(SDOperand Cond, MVT::ValueType SVT,
750 unsigned RTrue, unsigned RFalse, unsigned RDest) {
752 EQ, NE, LT, LE, GT, GE, B, BE, A, AE, P, NP,
754 } CondCode = NOT_SET;
756 static const unsigned CMOVTAB16[] = {
757 X86::CMOVE16rr, X86::CMOVNE16rr, X86::CMOVL16rr, X86::CMOVLE16rr,
758 X86::CMOVG16rr, X86::CMOVGE16rr, X86::CMOVB16rr, X86::CMOVBE16rr,
759 X86::CMOVA16rr, X86::CMOVAE16rr, X86::CMOVP16rr, X86::CMOVNP16rr,
761 static const unsigned CMOVTAB32[] = {
762 X86::CMOVE32rr, X86::CMOVNE32rr, X86::CMOVL32rr, X86::CMOVLE32rr,
763 X86::CMOVG32rr, X86::CMOVGE32rr, X86::CMOVB32rr, X86::CMOVBE32rr,
764 X86::CMOVA32rr, X86::CMOVAE32rr, X86::CMOVP32rr, X86::CMOVNP32rr,
766 static const unsigned CMOVTABFP[] = {
767 X86::FCMOVE , X86::FCMOVNE, /*missing*/0, /*missing*/0,
768 /*missing*/0, /*missing*/0, X86::FCMOVB , X86::FCMOVBE,
769 X86::FCMOVA , X86::FCMOVAE, X86::FCMOVP , X86::FCMOVNP
772 if (SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond)) {
773 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
774 switch (SetCC->getCondition()) {
775 default: assert(0 && "Unknown integer comparison!");
776 case ISD::SETEQ: CondCode = EQ; break;
777 case ISD::SETGT: CondCode = GT; break;
778 case ISD::SETGE: CondCode = GE; break;
779 case ISD::SETLT: CondCode = LT; break;
780 case ISD::SETLE: CondCode = LE; break;
781 case ISD::SETNE: CondCode = NE; break;
782 case ISD::SETULT: CondCode = B; break;
783 case ISD::SETUGT: CondCode = A; break;
784 case ISD::SETULE: CondCode = BE; break;
785 case ISD::SETUGE: CondCode = AE; break;
788 // On a floating point condition, the flags are set as follows:
792 // 1 | 0 | 0 | X == Y
793 // 1 | 1 | 1 | unordered
795 switch (SetCC->getCondition()) {
796 default: assert(0 && "Unknown FP comparison!");
798 case ISD::SETEQ: CondCode = EQ; break; // True if ZF = 1
800 case ISD::SETGT: CondCode = A; break; // True if CF = 0 and ZF = 0
802 case ISD::SETGE: CondCode = AE; break; // True if CF = 0
804 case ISD::SETLT: CondCode = B; break; // True if CF = 1
806 case ISD::SETLE: CondCode = BE; break; // True if CF = 1 or ZF = 1
808 case ISD::SETNE: CondCode = NE; break; // True if ZF = 0
809 case ISD::SETUO: CondCode = P; break; // True if PF = 1
810 case ISD::SETO: CondCode = NP; break; // True if PF = 0
811 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
812 case ISD::SETUGE: // PF = 1 | CF = 0
813 case ISD::SETUNE: // PF = 1 | ZF = 0
814 case ISD::SETOEQ: // PF = 0 & ZF = 1
815 case ISD::SETOLT: // PF = 0 & CF = 1
816 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
817 // We cannot emit this comparison as a single cmov.
824 if (CondCode != NOT_SET) {
826 default: assert(0 && "Cannot select this type!");
827 case MVT::i16: Opc = CMOVTAB16[CondCode]; break;
828 case MVT::i32: Opc = CMOVTAB32[CondCode]; break;
830 case MVT::f64: Opc = CMOVTABFP[CondCode]; break;
834 // Finally, if we weren't able to fold this, just emit the condition and test
836 if (CondCode == NOT_SET || Opc == 0) {
837 // Get the condition into the zero flag.
838 unsigned CondReg = SelectExpr(Cond);
839 BuildMI(BB, X86::TEST8rr, 2).addReg(CondReg).addReg(CondReg);
842 default: assert(0 && "Cannot select this type!");
843 case MVT::i16: Opc = X86::CMOVE16rr; break;
844 case MVT::i32: Opc = X86::CMOVE32rr; break;
846 case MVT::f64: Opc = X86::FCMOVE; break;
849 // FIXME: CMP R, 0 -> TEST R, R
850 EmitCMP(Cond.getOperand(0), Cond.getOperand(1));
851 std::swap(RTrue, RFalse);
853 BuildMI(BB, Opc, 2, RDest).addReg(RTrue).addReg(RFalse);
856 void ISel::EmitCMP(SDOperand LHS, SDOperand RHS) {
858 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
860 switch (RHS.getValueType()) {
863 case MVT::i8: Opc = X86::CMP8ri; break;
864 case MVT::i16: Opc = X86::CMP16ri; break;
865 case MVT::i32: Opc = X86::CMP32ri; break;
868 unsigned Tmp1 = SelectExpr(LHS);
869 BuildMI(BB, Opc, 2).addReg(Tmp1).addImm(CN->getValue());
874 switch (LHS.getValueType()) {
875 default: assert(0 && "Cannot compare this value!");
877 case MVT::i8: Opc = X86::CMP8rr; break;
878 case MVT::i16: Opc = X86::CMP16rr; break;
879 case MVT::i32: Opc = X86::CMP32rr; break;
881 case MVT::f64: Opc = X86::FUCOMIr; break;
884 if (getRegPressure(LHS) > getRegPressure(RHS)) {
885 Tmp1 = SelectExpr(LHS);
886 Tmp2 = SelectExpr(RHS);
888 Tmp2 = SelectExpr(RHS);
889 Tmp1 = SelectExpr(LHS);
891 BuildMI(BB, Opc, 2).addReg(Tmp1).addReg(Tmp2);
894 /// isFoldableLoad - Return true if this is a load instruction that can safely
895 /// be folded into an operation that uses it.
896 bool ISel::isFoldableLoad(SDOperand Op) {
897 if (Op.getOpcode() != ISD::LOAD ||
898 // FIXME: currently can't fold constant pool indexes.
899 isa<ConstantPoolSDNode>(Op.getOperand(1)))
902 // If this load has already been emitted, we clearly can't fold it.
903 if (ExprMap.count(Op)) return false;
905 return Op.Val->use_size() == 2;
908 /// EmitFoldedLoad - Ensure that the arguments of the load are code generated,
909 /// and compute the address being loaded into AM.
910 void ISel::EmitFoldedLoad(SDOperand Op, X86AddressMode &AM) {
911 SDOperand Chain = Op.getOperand(0);
912 SDOperand Address = Op.getOperand(1);
913 if (getRegPressure(Chain) > getRegPressure(Address)) {
915 SelectAddress(Address, AM);
917 SelectAddress(Address, AM);
921 // The chain for this load is now lowered.
922 LoweredTokens.insert(SDOperand(Op.Val, 1));
923 ExprMap[SDOperand(Op.Val, 1)] = 1;
926 unsigned ISel::SelectExpr(SDOperand N) {
928 unsigned Tmp1, Tmp2, Tmp3;
930 SDNode *Node = N.Val;
933 if (Node->getOpcode() == ISD::CopyFromReg)
934 // Just use the specified register as our input.
935 return dyn_cast<CopyRegSDNode>(Node)->getReg();
937 unsigned &Reg = ExprMap[N];
940 if (N.getOpcode() != ISD::CALL)
941 Reg = Result = (N.getValueType() != MVT::Other) ?
942 MakeReg(N.getValueType()) : 1;
944 // If this is a call instruction, make sure to prepare ALL of the result
945 // values as well as the chain.
946 if (Node->getNumValues() == 1)
947 Reg = Result = 1; // Void call, just a chain.
949 Result = MakeReg(Node->getValueType(0));
950 ExprMap[N.getValue(0)] = Result;
951 for (unsigned i = 1, e = N.Val->getNumValues()-1; i != e; ++i)
952 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
953 ExprMap[SDOperand(Node, Node->getNumValues()-1)] = 1;
957 switch (N.getOpcode()) {
960 assert(0 && "Node not handled!\n");
961 case ISD::FrameIndex:
962 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
963 addFrameReference(BuildMI(BB, X86::LEA32r, 4, Result), (int)Tmp1);
965 case ISD::ConstantPool:
966 Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
967 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 4, Result), Tmp1);
969 case ISD::ConstantFP:
970 ContainsFPCode = true;
971 Tmp1 = Result; // Intermediate Register
972 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
973 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
974 Tmp1 = MakeReg(MVT::f64);
976 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
977 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
978 BuildMI(BB, X86::FLD0, 0, Tmp1);
979 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
980 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
981 BuildMI(BB, X86::FLD1, 0, Tmp1);
983 assert(0 && "Unexpected constant!");
985 BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1);
988 switch (N.getValueType()) {
989 default: assert(0 && "Cannot use constants of this type!");
991 case MVT::i8: Opc = X86::MOV8ri; break;
992 case MVT::i16: Opc = X86::MOV16ri; break;
993 case MVT::i32: Opc = X86::MOV32ri; break;
995 BuildMI(BB, Opc, 1,Result).addImm(cast<ConstantSDNode>(N)->getValue());
997 case ISD::GlobalAddress: {
998 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
999 BuildMI(BB, X86::MOV32ri, 1, Result).addGlobalAddress(GV);
1002 case ISD::ExternalSymbol: {
1003 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
1004 BuildMI(BB, X86::MOV32ri, 1, Result).addExternalSymbol(Sym);
1007 case ISD::FP_EXTEND:
1008 Tmp1 = SelectExpr(N.getOperand(0));
1009 BuildMI(BB, X86::FpMOV, 1, Result).addReg(Tmp1);
1011 case ISD::ZERO_EXTEND: {
1012 int DestIs16 = N.getValueType() == MVT::i16;
1013 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
1014 Tmp1 = SelectExpr(N.getOperand(0));
1016 // FIXME: This hack is here for zero extension casts from bool to i8. This
1017 // would not be needed if bools were promoted by Legalize.
1018 if (N.getValueType() == MVT::i8) {
1019 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(Tmp1);
1023 static const unsigned Opc[3] = {
1024 X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOVZX16rr8
1026 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
1029 case ISD::SIGN_EXTEND: {
1030 int DestIs16 = N.getValueType() == MVT::i16;
1031 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
1033 // FIXME: Legalize should promote bools to i8!
1034 assert(N.getOperand(0).getValueType() != MVT::i1 &&
1035 "Sign extend from bool not implemented!");
1037 static const unsigned Opc[3] = {
1038 X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOVSX16rr8
1040 Tmp1 = SelectExpr(N.getOperand(0));
1041 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
1045 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by
1046 // a move out of AX or AL.
1047 switch (N.getOperand(0).getValueType()) {
1048 default: assert(0 && "Unknown truncate!");
1049 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
1050 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
1051 case MVT::i32: Tmp2 = X86::EAX; Opc = X86::MOV32rr; break;
1053 Tmp1 = SelectExpr(N.getOperand(0));
1054 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
1056 switch (N.getValueType()) {
1057 default: assert(0 && "Unknown truncate!");
1059 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
1060 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
1062 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
1066 // Truncate from double to float by storing to memory as float,
1067 // then reading it back into a register.
1069 // Create as stack slot to use.
1070 // FIXME: This should automatically be made by the Legalizer!
1071 Tmp1 = TLI.getTargetData().getFloatAlignment();
1072 Tmp2 = BB->getParent()->getFrameInfo()->CreateStackObject(4, Tmp1);
1074 // Codegen the input.
1075 Tmp1 = SelectExpr(N.getOperand(0));
1077 // Emit the store, then the reload.
1078 addFrameReference(BuildMI(BB, X86::FST32m, 5), Tmp2).addReg(Tmp1);
1079 addFrameReference(BuildMI(BB, X86::FLD32m, 5, Result), Tmp2);
1082 case ISD::SINT_TO_FP:
1083 case ISD::UINT_TO_FP: {
1084 // FIXME: Most of this grunt work should be done by legalize!
1085 ContainsFPCode = true;
1087 // Promote the integer to a type supported by FLD. We do this because there
1088 // are no unsigned FLD instructions, so we must promote an unsigned value to
1089 // a larger signed value, then use FLD on the larger value.
1091 MVT::ValueType PromoteType = MVT::Other;
1092 MVT::ValueType SrcTy = N.getOperand(0).getValueType();
1093 unsigned PromoteOpcode = 0;
1094 unsigned RealDestReg = Result;
1098 // We don't have the facilities for directly loading byte sized data from
1099 // memory (even signed). Promote it to 16 bits.
1100 PromoteType = MVT::i16;
1101 PromoteOpcode = Node->getOpcode() == ISD::SINT_TO_FP ?
1102 X86::MOVSX16rr8 : X86::MOVZX16rr8;
1105 if (Node->getOpcode() == ISD::UINT_TO_FP) {
1106 PromoteType = MVT::i32;
1107 PromoteOpcode = X86::MOVZX32rr16;
1111 // Don't fild into the real destination.
1112 if (Node->getOpcode() == ISD::UINT_TO_FP)
1113 Result = MakeReg(Node->getValueType(0));
1117 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
1119 if (PromoteType != MVT::Other) {
1120 Tmp2 = MakeReg(PromoteType);
1121 BuildMI(BB, PromoteOpcode, 1, Tmp2).addReg(Tmp1);
1122 SrcTy = PromoteType;
1126 // Spill the integer to memory and reload it from there.
1127 unsigned Size = MVT::getSizeInBits(SrcTy)/8;
1128 MachineFunction *F = BB->getParent();
1129 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
1133 // FIXME: this won't work for cast [u]long to FP
1134 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
1135 FrameIdx).addReg(Tmp1);
1136 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
1137 FrameIdx, 4).addReg(Tmp1+1);
1138 addFrameReference(BuildMI(BB, X86::FILD64m, 5, Result), FrameIdx);
1141 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
1142 FrameIdx).addReg(Tmp1);
1143 addFrameReference(BuildMI(BB, X86::FILD32m, 5, Result), FrameIdx);
1146 addFrameReference(BuildMI(BB, X86::MOV16mr, 5),
1147 FrameIdx).addReg(Tmp1);
1148 addFrameReference(BuildMI(BB, X86::FILD16m, 5, Result), FrameIdx);
1150 default: break; // No promotion required.
1153 if (Node->getOpcode() == ISD::UINT_TO_FP && SrcTy == MVT::i32) {
1154 // If this is a cast from uint -> double, we need to be careful when if
1155 // the "sign" bit is set. If so, we don't want to make a negative number,
1156 // we want to make a positive number. Emit code to add an offset if the
1159 // Compute whether the sign bit is set by shifting the reg right 31 bits.
1160 unsigned IsNeg = MakeReg(MVT::i32);
1161 BuildMI(BB, X86::SHR32ri, 2, IsNeg).addReg(Tmp1).addImm(31);
1163 // Create a CP value that has the offset in one word and 0 in the other.
1164 static ConstantInt *TheOffset = ConstantUInt::get(Type::ULongTy,
1165 0x4f80000000000000ULL);
1166 unsigned CPI = F->getConstantPool()->getConstantPoolIndex(TheOffset);
1167 BuildMI(BB, X86::FADD32m, 5, RealDestReg).addReg(Result)
1168 .addConstantPoolIndex(CPI).addZImm(4).addReg(IsNeg).addSImm(0);
1170 } else if (Node->getOpcode() == ISD::UINT_TO_FP && SrcTy == MVT::i64) {
1171 // We need special handling for unsigned 64-bit integer sources. If the
1172 // input number has the "sign bit" set, then we loaded it incorrectly as a
1173 // negative 64-bit number. In this case, add an offset value.
1175 // Emit a test instruction to see if the dynamic input value was signed.
1176 BuildMI(BB, X86::TEST32rr, 2).addReg(Tmp1+1).addReg(Tmp1+1);
1178 // If the sign bit is set, get a pointer to an offset, otherwise get a
1179 // pointer to a zero.
1180 MachineConstantPool *CP = F->getConstantPool();
1181 unsigned Zero = MakeReg(MVT::i32);
1182 Constant *Null = Constant::getNullValue(Type::UIntTy);
1183 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 5, Zero),
1184 CP->getConstantPoolIndex(Null));
1185 unsigned Offset = MakeReg(MVT::i32);
1186 Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
1188 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 5, Offset),
1189 CP->getConstantPoolIndex(OffsetCst));
1190 unsigned Addr = MakeReg(MVT::i32);
1191 BuildMI(BB, X86::CMOVS32rr, 2, Addr).addReg(Zero).addReg(Offset);
1193 // Load the constant for an add. FIXME: this could make an 'fadd' that
1194 // reads directly from memory, but we don't support these yet.
1195 unsigned ConstReg = MakeReg(MVT::f64);
1196 addDirectMem(BuildMI(BB, X86::FLD32m, 4, ConstReg), Addr);
1198 BuildMI(BB, X86::FpADD, 2, RealDestReg).addReg(ConstReg).addReg(Result);
1202 case ISD::FP_TO_SINT:
1203 case ISD::FP_TO_UINT: {
1204 // FIXME: Most of this grunt work should be done by legalize!
1205 Tmp1 = SelectExpr(N.getOperand(0)); // Get the operand register
1207 // Change the floating point control register to use "round towards zero"
1208 // mode when truncating to an integer value.
1210 MachineFunction *F = BB->getParent();
1211 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
1212 addFrameReference(BuildMI(BB, X86::FNSTCW16m, 4), CWFrameIdx);
1214 // Load the old value of the high byte of the control word...
1215 unsigned HighPartOfCW = MakeReg(MVT::i8);
1216 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, HighPartOfCW),
1219 // Set the high part to be round to zero...
1220 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
1221 CWFrameIdx, 1).addImm(12);
1223 // Reload the modified control word now...
1224 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1226 // Restore the memory image of control word to original value
1227 addFrameReference(BuildMI(BB, X86::MOV8mr, 5),
1228 CWFrameIdx, 1).addReg(HighPartOfCW);
1230 // We don't have the facilities for directly storing byte sized data to
1231 // memory. Promote it to 16 bits. We also must promote unsigned values to
1232 // larger classes because we only have signed FP stores.
1233 MVT::ValueType StoreClass = Node->getValueType(0);
1234 if (StoreClass == MVT::i8 || Node->getOpcode() == ISD::FP_TO_UINT)
1235 switch (StoreClass) {
1236 case MVT::i8: StoreClass = MVT::i16; break;
1237 case MVT::i16: StoreClass = MVT::i32; break;
1238 case MVT::i32: StoreClass = MVT::i64; break;
1239 // The following treatment of cLong may not be perfectly right,
1240 // but it survives chains of casts of the form
1241 // double->ulong->double.
1242 case MVT::i64: StoreClass = MVT::i64; break;
1243 default: assert(0 && "Unknown store class!");
1246 // Spill the integer to memory and reload it from there.
1247 unsigned Size = MVT::getSizeInBits(StoreClass)/8;
1248 int FrameIdx = F->getFrameInfo()->CreateStackObject(Size, Size);
1250 switch (StoreClass) {
1251 default: assert(0 && "Unknown store class!");
1253 addFrameReference(BuildMI(BB, X86::FIST16m, 5), FrameIdx).addReg(Tmp1);
1256 addFrameReference(BuildMI(BB, X86::FIST32m, 5), FrameIdx).addReg(Tmp1);
1259 addFrameReference(BuildMI(BB, X86::FISTP64m, 5), FrameIdx).addReg(Tmp1);
1263 switch (Node->getValueType(0)) {
1265 assert(0 && "Unknown integer type!");
1267 // FIXME: this isn't gunna work.
1268 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result), FrameIdx);
1269 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result+1), FrameIdx, 4);
1271 addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result), FrameIdx);
1274 addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Result), FrameIdx);
1277 addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Result), FrameIdx);
1281 // Reload the original control word now.
1282 addFrameReference(BuildMI(BB, X86::FLDCW16m, 4), CWFrameIdx);
1286 Op0 = N.getOperand(0);
1287 Op1 = N.getOperand(1);
1289 if (isFoldableLoad(Op0))
1290 std::swap(Op0, Op1);
1292 if (isFoldableLoad(Op1)) {
1293 switch (N.getValueType()) {
1294 default: assert(0 && "Cannot add this type!");
1296 case MVT::i8: Opc = X86::ADD8rm; break;
1297 case MVT::i16: Opc = X86::ADD16rm; break;
1298 case MVT::i32: Opc = X86::ADD32rm; break;
1299 case MVT::f32: Opc = X86::FADD32m; break;
1300 case MVT::f64: Opc = X86::FADD64m; break;
1303 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1304 Tmp1 = SelectExpr(Op0);
1305 EmitFoldedLoad(Op1, AM);
1307 EmitFoldedLoad(Op1, AM);
1308 Tmp1 = SelectExpr(Op0);
1310 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
1314 // See if we can codegen this as an LEA to fold operations together.
1315 if (N.getValueType() == MVT::i32) {
1317 if (!SelectAddress(Op0, AM) && !SelectAddress(Op1, AM)) {
1318 // If this is not just an add, emit the LEA. For a simple add (like
1319 // reg+reg or reg+imm), we just emit an add. It might be a good idea to
1320 // leave this as LEA, then peephole it to 'ADD' after two address elim
1322 if (AM.Scale != 1 || AM.BaseType == X86AddressMode::FrameIndexBase ||
1323 AM.GV || (AM.Base.Reg && AM.IndexReg && AM.Disp)) {
1324 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), AM);
1330 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
1332 if (CN->getValue() == 1) { // add X, 1 -> inc X
1333 switch (N.getValueType()) {
1334 default: assert(0 && "Cannot integer add this type!");
1335 case MVT::i8: Opc = X86::INC8r; break;
1336 case MVT::i16: Opc = X86::INC16r; break;
1337 case MVT::i32: Opc = X86::INC32r; break;
1339 } else if (CN->isAllOnesValue()) { // add X, -1 -> dec X
1340 switch (N.getValueType()) {
1341 default: assert(0 && "Cannot integer add this type!");
1342 case MVT::i8: Opc = X86::DEC8r; break;
1343 case MVT::i16: Opc = X86::DEC16r; break;
1344 case MVT::i32: Opc = X86::DEC32r; break;
1349 Tmp1 = SelectExpr(Op0);
1350 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1354 switch (N.getValueType()) {
1355 default: assert(0 && "Cannot add this type!");
1356 case MVT::i8: Opc = X86::ADD8ri; break;
1357 case MVT::i16: Opc = X86::ADD16ri; break;
1358 case MVT::i32: Opc = X86::ADD32ri; break;
1361 Tmp1 = SelectExpr(Op0);
1362 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1367 switch (N.getValueType()) {
1368 default: assert(0 && "Cannot add this type!");
1369 case MVT::i8: Opc = X86::ADD8rr; break;
1370 case MVT::i16: Opc = X86::ADD16rr; break;
1371 case MVT::i32: Opc = X86::ADD32rr; break;
1373 case MVT::f64: Opc = X86::FpADD; break;
1376 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1377 Tmp1 = SelectExpr(Op0);
1378 Tmp2 = SelectExpr(Op1);
1380 Tmp2 = SelectExpr(Op1);
1381 Tmp1 = SelectExpr(Op0);
1384 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1391 static const unsigned SUBTab[] = {
1392 X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0,
1393 X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::FSUB32m, X86::FSUB64m,
1394 X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB , X86::FpSUB,
1396 static const unsigned MULTab[] = {
1397 0, X86::IMUL16rri, X86::IMUL32rri, 0, 0,
1398 0, X86::IMUL16rm , X86::IMUL32rm, X86::FMUL32m, X86::FMUL64m,
1399 0, X86::IMUL16rr , X86::IMUL32rr, X86::FpMUL , X86::FpMUL,
1401 static const unsigned ANDTab[] = {
1402 X86::AND8ri, X86::AND16ri, X86::AND32ri, 0, 0,
1403 X86::AND8rm, X86::AND16rm, X86::AND32rm, 0, 0,
1404 X86::AND8rr, X86::AND16rr, X86::AND32rr, 0, 0,
1406 static const unsigned ORTab[] = {
1407 X86::OR8ri, X86::OR16ri, X86::OR32ri, 0, 0,
1408 X86::OR8rm, X86::OR16rm, X86::OR32rm, 0, 0,
1409 X86::OR8rr, X86::OR16rr, X86::OR32rr, 0, 0,
1411 static const unsigned XORTab[] = {
1412 X86::XOR8ri, X86::XOR16ri, X86::XOR32ri, 0, 0,
1413 X86::XOR8rm, X86::XOR16rm, X86::XOR32rm, 0, 0,
1414 X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0, 0,
1417 Op0 = Node->getOperand(0);
1418 Op1 = Node->getOperand(1);
1420 if (Node->getOpcode() == ISD::SUB && MVT::isInteger(N.getValueType()))
1421 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(0)))
1422 if (CN->isNullValue()) { // 0 - N -> neg N
1423 switch (N.getValueType()) {
1424 default: assert(0 && "Cannot sub this type!");
1426 case MVT::i8: Opc = X86::NEG8r; break;
1427 case MVT::i16: Opc = X86::NEG16r; break;
1428 case MVT::i32: Opc = X86::NEG32r; break;
1430 Tmp1 = SelectExpr(N.getOperand(1));
1431 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1435 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1)) {
1436 if (CN->isAllOnesValue() && Node->getOpcode() == ISD::XOR) {
1437 switch (N.getValueType()) {
1438 default: assert(0 && "Cannot add this type!");
1440 case MVT::i8: Opc = X86::NOT8r; break;
1441 case MVT::i16: Opc = X86::NOT16r; break;
1442 case MVT::i32: Opc = X86::NOT32r; break;
1444 Tmp1 = SelectExpr(Op0);
1445 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
1449 switch (N.getValueType()) {
1450 default: assert(0 && "Cannot xor this type!");
1452 case MVT::i8: Opc = 0; break;
1453 case MVT::i16: Opc = 1; break;
1454 case MVT::i32: Opc = 2; break;
1456 switch (Node->getOpcode()) {
1457 default: assert(0 && "Unreachable!");
1458 case ISD::SUB: Opc = SUBTab[Opc]; break;
1459 case ISD::MUL: Opc = MULTab[Opc]; break;
1460 case ISD::AND: Opc = ANDTab[Opc]; break;
1461 case ISD::OR: Opc = ORTab[Opc]; break;
1462 case ISD::XOR: Opc = XORTab[Opc]; break;
1464 if (Opc) { // Can't fold MUL:i8 R, imm
1465 Tmp1 = SelectExpr(Op0);
1466 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1471 if (isFoldableLoad(Op0))
1472 if (Node->getOpcode() != ISD::SUB) {
1473 std::swap(Op0, Op1);
1475 // Emit 'reverse' subract, with a memory operand.
1476 switch (N.getValueType()) {
1477 default: Opc = 0; break;
1478 case MVT::f32: Opc = X86::FSUBR32m; break;
1479 case MVT::f64: Opc = X86::FSUBR64m; break;
1483 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1484 EmitFoldedLoad(Op0, AM);
1485 Tmp1 = SelectExpr(Op1);
1487 Tmp1 = SelectExpr(Op1);
1488 EmitFoldedLoad(Op0, AM);
1490 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
1495 if (isFoldableLoad(Op1)) {
1496 switch (N.getValueType()) {
1497 default: assert(0 && "Cannot operate on this type!");
1499 case MVT::i8: Opc = 5; break;
1500 case MVT::i16: Opc = 6; break;
1501 case MVT::i32: Opc = 7; break;
1502 case MVT::f32: Opc = 8; break;
1503 case MVT::f64: Opc = 9; break;
1505 switch (Node->getOpcode()) {
1506 default: assert(0 && "Unreachable!");
1507 case ISD::SUB: Opc = SUBTab[Opc]; break;
1508 case ISD::MUL: Opc = MULTab[Opc]; break;
1509 case ISD::AND: Opc = ANDTab[Opc]; break;
1510 case ISD::OR: Opc = ORTab[Opc]; break;
1511 case ISD::XOR: Opc = XORTab[Opc]; break;
1515 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1516 Tmp1 = SelectExpr(Op0);
1517 EmitFoldedLoad(Op1, AM);
1519 EmitFoldedLoad(Op1, AM);
1520 Tmp1 = SelectExpr(Op0);
1523 addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM);
1525 assert(Node->getOpcode() == ISD::MUL &&
1526 N.getValueType() == MVT::i8 && "Unexpected situation!");
1527 // Must use the MUL instruction, which forces use of AL.
1528 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
1529 addFullAddress(BuildMI(BB, X86::MUL8m, 1), AM);
1530 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1535 if (getRegPressure(Op0) > getRegPressure(Op1)) {
1536 Tmp1 = SelectExpr(Op0);
1537 Tmp2 = SelectExpr(Op1);
1539 Tmp2 = SelectExpr(Op1);
1540 Tmp1 = SelectExpr(Op0);
1543 switch (N.getValueType()) {
1544 default: assert(0 && "Cannot add this type!");
1546 case MVT::i8: Opc = 10; break;
1547 case MVT::i16: Opc = 11; break;
1548 case MVT::i32: Opc = 12; break;
1549 case MVT::f32: Opc = 13; break;
1550 case MVT::f64: Opc = 14; break;
1552 switch (Node->getOpcode()) {
1553 default: assert(0 && "Unreachable!");
1554 case ISD::SUB: Opc = SUBTab[Opc]; break;
1555 case ISD::MUL: Opc = MULTab[Opc]; break;
1556 case ISD::AND: Opc = ANDTab[Opc]; break;
1557 case ISD::OR: Opc = ORTab[Opc]; break;
1558 case ISD::XOR: Opc = XORTab[Opc]; break;
1561 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1563 assert(Node->getOpcode() == ISD::MUL &&
1564 N.getValueType() == MVT::i8 && "Unexpected situation!");
1565 // Must use the MUL instruction, which forces use of AL.
1566 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(Tmp1);
1567 BuildMI(BB, X86::MUL8r, 1).addReg(Tmp2);
1568 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1573 if (N.getValueType() != MVT::i1 && N.getValueType() != MVT::i8) {
1574 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
1575 Tmp2 = SelectExpr(N.getOperand(1));
1576 Tmp3 = SelectExpr(N.getOperand(2));
1578 Tmp3 = SelectExpr(N.getOperand(2));
1579 Tmp2 = SelectExpr(N.getOperand(1));
1581 EmitSelectCC(N.getOperand(0), N.getValueType(), Tmp2, Tmp3, Result);
1584 // FIXME: This should not be implemented here, it should be in the generic
1586 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
1587 Tmp2 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1589 Tmp3 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1592 Tmp3 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1594 Tmp2 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1597 unsigned TmpReg = MakeReg(MVT::i16);
1598 EmitSelectCC(N.getOperand(0), MVT::i16, Tmp2, Tmp3, TmpReg);
1599 // FIXME: need subregs to do better than this!
1600 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(TmpReg);
1601 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1609 if (N.getOpcode() == ISD::SDIV)
1610 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1611 // FIXME: These special cases should be handled by the lowering impl!
1612 unsigned RHS = CN->getValue();
1618 if (RHS && (RHS & (RHS-1)) == 0) { // Signed division by power of 2?
1619 unsigned Log = log2(RHS);
1620 unsigned TmpReg = MakeReg(N.getValueType());
1621 unsigned SAROpc, SHROpc, ADDOpc, NEGOpc;
1622 switch (N.getValueType()) {
1623 default: assert("Unknown type to signed divide!");
1625 SAROpc = X86::SAR8ri;
1626 SHROpc = X86::SHR8ri;
1627 ADDOpc = X86::ADD8rr;
1628 NEGOpc = X86::NEG8r;
1631 SAROpc = X86::SAR16ri;
1632 SHROpc = X86::SHR16ri;
1633 ADDOpc = X86::ADD16rr;
1634 NEGOpc = X86::NEG16r;
1637 SAROpc = X86::SAR32ri;
1638 SHROpc = X86::SHR32ri;
1639 ADDOpc = X86::ADD32rr;
1640 NEGOpc = X86::NEG32r;
1643 Tmp1 = SelectExpr(N.getOperand(0));
1644 BuildMI(BB, SAROpc, 2, TmpReg).addReg(Tmp1).addImm(Log-1);
1645 unsigned TmpReg2 = MakeReg(N.getValueType());
1646 BuildMI(BB, SHROpc, 2, TmpReg2).addReg(TmpReg).addImm(32-Log);
1647 unsigned TmpReg3 = MakeReg(N.getValueType());
1648 BuildMI(BB, ADDOpc, 2, TmpReg3).addReg(Tmp1).addReg(TmpReg2);
1650 unsigned TmpReg4 = isNeg ? MakeReg(N.getValueType()) : Result;
1651 BuildMI(BB, SAROpc, 2, TmpReg4).addReg(TmpReg3).addImm(Log);
1653 BuildMI(BB, NEGOpc, 1, Result).addReg(TmpReg4);
1658 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1659 Tmp1 = SelectExpr(N.getOperand(0));
1660 Tmp2 = SelectExpr(N.getOperand(1));
1662 Tmp2 = SelectExpr(N.getOperand(1));
1663 Tmp1 = SelectExpr(N.getOperand(0));
1666 bool isSigned = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::SREM;
1667 bool isDiv = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::UDIV;
1668 unsigned LoReg, HiReg, DivOpcode, MovOpcode, ClrOpcode, SExtOpcode;
1669 switch (N.getValueType()) {
1670 default: assert(0 && "Cannot sdiv this type!");
1672 DivOpcode = isSigned ? X86::IDIV8r : X86::DIV8r;
1675 MovOpcode = X86::MOV8rr;
1676 ClrOpcode = X86::MOV8ri;
1677 SExtOpcode = X86::CBW;
1680 DivOpcode = isSigned ? X86::IDIV16r : X86::DIV16r;
1683 MovOpcode = X86::MOV16rr;
1684 ClrOpcode = X86::MOV16ri;
1685 SExtOpcode = X86::CWD;
1688 DivOpcode = isSigned ? X86::IDIV32r : X86::DIV32r;
1691 MovOpcode = X86::MOV32rr;
1692 ClrOpcode = X86::MOV32ri;
1693 SExtOpcode = X86::CDQ;
1695 case MVT::i64: assert(0 && "FIXME: implement i64 DIV/REM libcalls!");
1698 if (N.getOpcode() == ISD::SDIV)
1699 BuildMI(BB, X86::FpDIV, 2, Result).addReg(Tmp1).addReg(Tmp2);
1701 assert(0 && "FIXME: Emit frem libcall to fmod!");
1705 // Set up the low part.
1706 BuildMI(BB, MovOpcode, 1, LoReg).addReg(Tmp1);
1709 // Sign extend the low part into the high part.
1710 BuildMI(BB, SExtOpcode, 0);
1712 // Zero out the high part, effectively zero extending the input.
1713 BuildMI(BB, ClrOpcode, 1, HiReg).addImm(0);
1716 // Emit the DIV/IDIV instruction.
1717 BuildMI(BB, DivOpcode, 1).addReg(Tmp2);
1719 // Get the result of the divide or rem.
1720 BuildMI(BB, MovOpcode, 1, Result).addReg(isDiv ? LoReg : HiReg);
1725 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1726 if (CN->getValue() == 1) { // X = SHL Y, 1 -> X = ADD Y, Y
1727 switch (N.getValueType()) {
1728 default: assert(0 && "Cannot shift this type!");
1729 case MVT::i8: Opc = X86::ADD8rr; break;
1730 case MVT::i16: Opc = X86::ADD16rr; break;
1731 case MVT::i32: Opc = X86::ADD32rr; break;
1733 Tmp1 = SelectExpr(N.getOperand(0));
1734 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp1);
1738 switch (N.getValueType()) {
1739 default: assert(0 && "Cannot shift this type!");
1740 case MVT::i8: Opc = X86::SHL8ri; break;
1741 case MVT::i16: Opc = X86::SHL16ri; break;
1742 case MVT::i32: Opc = X86::SHL32ri; break;
1744 Tmp1 = SelectExpr(N.getOperand(0));
1745 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1749 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1750 Tmp1 = SelectExpr(N.getOperand(0));
1751 Tmp2 = SelectExpr(N.getOperand(1));
1753 Tmp2 = SelectExpr(N.getOperand(1));
1754 Tmp1 = SelectExpr(N.getOperand(0));
1757 switch (N.getValueType()) {
1758 default: assert(0 && "Cannot shift this type!");
1759 case MVT::i8 : Opc = X86::SHL8rCL; break;
1760 case MVT::i16: Opc = X86::SHL16rCL; break;
1761 case MVT::i32: Opc = X86::SHL32rCL; break;
1763 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1764 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1767 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1768 switch (N.getValueType()) {
1769 default: assert(0 && "Cannot shift this type!");
1770 case MVT::i8: Opc = X86::SHR8ri; break;
1771 case MVT::i16: Opc = X86::SHR16ri; break;
1772 case MVT::i32: Opc = X86::SHR32ri; break;
1774 Tmp1 = SelectExpr(N.getOperand(0));
1775 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1779 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1780 Tmp1 = SelectExpr(N.getOperand(0));
1781 Tmp2 = SelectExpr(N.getOperand(1));
1783 Tmp2 = SelectExpr(N.getOperand(1));
1784 Tmp1 = SelectExpr(N.getOperand(0));
1787 switch (N.getValueType()) {
1788 default: assert(0 && "Cannot shift this type!");
1789 case MVT::i8 : Opc = X86::SHR8rCL; break;
1790 case MVT::i16: Opc = X86::SHR16rCL; break;
1791 case MVT::i32: Opc = X86::SHR32rCL; break;
1793 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1794 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1797 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1798 switch (N.getValueType()) {
1799 default: assert(0 && "Cannot shift this type!");
1800 case MVT::i8: Opc = X86::SAR8ri; break;
1801 case MVT::i16: Opc = X86::SAR16ri; break;
1802 case MVT::i32: Opc = X86::SAR32ri; break;
1804 Tmp1 = SelectExpr(N.getOperand(0));
1805 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1809 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1810 Tmp1 = SelectExpr(N.getOperand(0));
1811 Tmp2 = SelectExpr(N.getOperand(1));
1813 Tmp2 = SelectExpr(N.getOperand(1));
1814 Tmp1 = SelectExpr(N.getOperand(0));
1817 switch (N.getValueType()) {
1818 default: assert(0 && "Cannot shift this type!");
1819 case MVT::i8 : Opc = X86::SAR8rCL; break;
1820 case MVT::i16: Opc = X86::SAR16rCL; break;
1821 case MVT::i32: Opc = X86::SAR32rCL; break;
1823 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1824 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1828 EmitCMP(N.getOperand(0), N.getOperand(1));
1829 EmitSetCC(BB, Result, cast<SetCCSDNode>(N)->getCondition(),
1830 MVT::isFloatingPoint(N.getOperand(1).getValueType()));
1833 // Make sure we generate both values.
1835 ExprMap[N.getValue(1)] = 1; // Generate the token
1837 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1839 switch (Node->getValueType(0)) {
1840 default: assert(0 && "Cannot load this type!");
1842 case MVT::i8: Opc = X86::MOV8rm; break;
1843 case MVT::i16: Opc = X86::MOV16rm; break;
1844 case MVT::i32: Opc = X86::MOV32rm; break;
1845 case MVT::f32: Opc = X86::FLD32m; ContainsFPCode = true; break;
1846 case MVT::f64: Opc = X86::FLD64m; ContainsFPCode = true; break;
1849 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1))){
1850 Select(N.getOperand(0));
1851 addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CP->getIndex());
1854 EmitFoldedLoad(N, AM);
1855 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
1859 case ISD::DYNAMIC_STACKALLOC:
1860 // Generate both result values.
1862 ExprMap[N.getValue(1)] = 1; // Generate the token
1864 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1866 // FIXME: We are currently ignoring the requested alignment for handling
1867 // greater than the stack alignment. This will need to be revisited at some
1868 // point. Align = N.getOperand(2);
1870 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
1871 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
1872 std::cerr << "Cannot allocate stack object with greater alignment than"
1873 << " the stack alignment yet!";
1877 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1878 Select(N.getOperand(0));
1879 BuildMI(BB, X86::SUB32ri, 2, X86::ESP).addReg(X86::ESP)
1880 .addImm(CN->getValue());
1882 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1883 Select(N.getOperand(0));
1884 Tmp1 = SelectExpr(N.getOperand(1));
1886 Tmp1 = SelectExpr(N.getOperand(1));
1887 Select(N.getOperand(0));
1890 // Subtract size from stack pointer, thereby allocating some space.
1891 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(Tmp1);
1894 // Put a pointer to the space into the result register, by copying the stack
1896 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::ESP);
1900 // The chain for this call is now lowered.
1901 LoweredTokens.insert(N.getValue(Node->getNumValues()-1));
1903 if (GlobalAddressSDNode *GASD =
1904 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
1905 Select(N.getOperand(0));
1906 BuildMI(BB, X86::CALLpcrel32, 1).addGlobalAddress(GASD->getGlobal(),true);
1907 } else if (ExternalSymbolSDNode *ESSDN =
1908 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1))) {
1909 Select(N.getOperand(0));
1910 BuildMI(BB, X86::CALLpcrel32,
1911 1).addExternalSymbol(ESSDN->getSymbol(), true);
1913 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
1914 Select(N.getOperand(0));
1915 Tmp1 = SelectExpr(N.getOperand(1));
1917 Tmp1 = SelectExpr(N.getOperand(1));
1918 Select(N.getOperand(0));
1921 BuildMI(BB, X86::CALL32r, 1).addReg(Tmp1);
1923 switch (Node->getValueType(0)) {
1924 default: assert(0 && "Unknown value type for call result!");
1925 case MVT::Other: return 1;
1928 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1931 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
1934 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
1935 if (Node->getValueType(1) == MVT::i32)
1936 BuildMI(BB, X86::MOV32rr, 1, Result+1).addReg(X86::EDX);
1939 case MVT::f64: // Floating-point return values live in %ST(0)
1940 ContainsFPCode = true;
1941 BuildMI(BB, X86::FpGETRESULT, 1, Result);
1944 return Result+N.ResNo;
1950 void ISel::Select(SDOperand N) {
1951 unsigned Tmp1, Tmp2, Opc;
1953 // FIXME: Disable for our current expansion model!
1954 if (/*!N->hasOneUse() &&*/ !LoweredTokens.insert(N).second)
1955 return; // Already selected.
1957 SDNode *Node = N.Val;
1959 switch (Node->getOpcode()) {
1961 Node->dump(); std::cerr << "\n";
1962 assert(0 && "Node not handled yet!");
1963 case ISD::EntryToken: return; // Noop
1964 case ISD::CopyToReg:
1965 Select(N.getOperand(0));
1966 Tmp1 = SelectExpr(N.getOperand(1));
1967 Tmp2 = cast<CopyRegSDNode>(N)->getReg();
1970 switch (N.getOperand(1).getValueType()) {
1971 default: assert(0 && "Invalid type for operation!");
1973 case MVT::i8: Opc = X86::MOV8rr; break;
1974 case MVT::i16: Opc = X86::MOV16rr; break;
1975 case MVT::i32: Opc = X86::MOV32rr; break;
1977 case MVT::f64: Opc = X86::FpMOV; ContainsFPCode = true; break;
1979 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
1983 switch (N.getNumOperands()) {
1985 assert(0 && "Unknown return instruction!");
1987 assert(N.getOperand(1).getValueType() == MVT::i32 &&
1988 N.getOperand(2).getValueType() == MVT::i32 &&
1989 "Unknown two-register value!");
1990 if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) {
1991 Tmp1 = SelectExpr(N.getOperand(1));
1992 Tmp2 = SelectExpr(N.getOperand(2));
1994 Tmp2 = SelectExpr(N.getOperand(2));
1995 Tmp1 = SelectExpr(N.getOperand(1));
1997 Select(N.getOperand(0));
1999 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
2000 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(Tmp2);
2001 // Declare that EAX & EDX are live on exit.
2002 BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
2006 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2007 Select(N.getOperand(0));
2008 Tmp1 = SelectExpr(N.getOperand(1));
2010 Tmp1 = SelectExpr(N.getOperand(1));
2011 Select(N.getOperand(0));
2013 switch (N.getOperand(1).getValueType()) {
2014 default: assert(0 && "All other types should have been promoted!!");
2016 BuildMI(BB, X86::FpSETRESULT, 1).addReg(Tmp1);
2017 // Declare that top-of-stack is live on exit
2018 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
2021 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
2022 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
2027 Select(N.getOperand(0));
2030 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
2033 Select(N.getOperand(0));
2034 MachineBasicBlock *Dest =
2035 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
2036 BuildMI(BB, X86::JMP, 1).addMBB(Dest);
2041 MachineBasicBlock *Dest =
2042 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
2044 // Try to fold a setcc into the branch. If this fails, emit a test/jne
2046 if (EmitBranchCC(Dest, N.getOperand(0), N.getOperand(1))) {
2047 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
2048 Select(N.getOperand(0));
2049 Tmp1 = SelectExpr(N.getOperand(1));
2051 Tmp1 = SelectExpr(N.getOperand(1));
2052 Select(N.getOperand(0));
2054 BuildMI(BB, X86::TEST8rr, 2).addReg(Tmp1).addReg(Tmp1);
2055 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
2062 case ISD::DYNAMIC_STACKALLOC:
2066 // Select the address.
2069 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2071 switch (CN->getValueType(0)) {
2072 default: assert(0 && "Invalid type for operation!");
2074 case MVT::i8: Opc = X86::MOV8mi; break;
2075 case MVT::i16: Opc = X86::MOV16mi; break;
2076 case MVT::i32: Opc = X86::MOV32mi; break;
2078 case MVT::f64: break;
2081 if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
2082 Select(N.getOperand(0));
2083 SelectAddress(N.getOperand(2), AM);
2085 SelectAddress(N.getOperand(2), AM);
2086 Select(N.getOperand(0));
2088 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addImm(CN->getValue());
2092 switch (N.getOperand(1).getValueType()) {
2093 default: assert(0 && "Cannot store this type!");
2095 case MVT::i8: Opc = X86::MOV8mr; break;
2096 case MVT::i16: Opc = X86::MOV16mr; break;
2097 case MVT::i32: Opc = X86::MOV32mr; break;
2098 case MVT::f32: Opc = X86::FST32m; break;
2099 case MVT::f64: Opc = X86::FST64m; break;
2102 std::vector<std::pair<unsigned, unsigned> > RP;
2103 RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0));
2104 RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1));
2105 RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2));
2106 std::sort(RP.begin(), RP.end());
2108 for (unsigned i = 0; i != 3; ++i)
2109 switch (RP[2-i].second) {
2110 default: assert(0 && "Unknown operand number!");
2111 case 0: Select(N.getOperand(0)); break;
2112 case 1: Tmp1 = SelectExpr(N.getOperand(1)); break;
2113 case 2: SelectAddress(N.getOperand(2), AM); break;
2116 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
2119 case ISD::ADJCALLSTACKDOWN:
2120 case ISD::ADJCALLSTACKUP:
2121 Select(N.getOperand(0));
2122 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
2124 Opc = N.getOpcode() == ISD::ADJCALLSTACKDOWN ? X86::ADJCALLSTACKDOWN :
2125 X86::ADJCALLSTACKUP;
2126 BuildMI(BB, Opc, 1).addImm(Tmp1);
2129 Select(N.getOperand(0)); // Select the chain.
2131 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
2132 if (Align == 0) Align = 1;
2134 // Turn the byte code into # iterations
2137 if (ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
2138 unsigned Val = ValC->getValue() & 255;
2140 // If the value is a constant, then we can potentially use larger sets.
2141 switch (Align & 3) {
2142 case 2: // WORD aligned
2143 CountReg = MakeReg(MVT::i32);
2144 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2145 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
2147 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2148 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
2150 BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
2151 Opcode = X86::REP_STOSW;
2153 case 0: // DWORD aligned
2154 CountReg = MakeReg(MVT::i32);
2155 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2156 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
2158 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2159 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
2161 Val = (Val << 8) | Val;
2162 BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
2163 Opcode = X86::REP_STOSD;
2165 default: // BYTE aligned
2166 CountReg = SelectExpr(Node->getOperand(3));
2167 BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
2168 Opcode = X86::REP_STOSB;
2172 // If it's not a constant value we are storing, just fall back. We could
2173 // try to be clever to form 16 bit and 32 bit values, but we don't yet.
2174 unsigned ValReg = SelectExpr(Node->getOperand(2));
2175 BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
2176 CountReg = SelectExpr(Node->getOperand(3));
2177 Opcode = X86::REP_STOSB;
2180 // No matter what the alignment is, we put the source in ESI, the
2181 // destination in EDI, and the count in ECX.
2182 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
2183 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
2184 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
2185 BuildMI(BB, Opcode, 0);
2189 Select(N.getOperand(0)); // Select the chain.
2191 (unsigned)cast<ConstantSDNode>(Node->getOperand(4))->getValue();
2192 if (Align == 0) Align = 1;
2194 // Turn the byte code into # iterations
2197 switch (Align & 3) {
2198 case 2: // WORD aligned
2199 CountReg = MakeReg(MVT::i32);
2200 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2201 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/2);
2203 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2204 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
2206 Opcode = X86::REP_MOVSW;
2208 case 0: // DWORD aligned
2209 CountReg = MakeReg(MVT::i32);
2210 if (ConstantSDNode *I = dyn_cast<ConstantSDNode>(Node->getOperand(3))) {
2211 BuildMI(BB, X86::MOV32ri, 1, CountReg).addImm(I->getValue()/4);
2213 unsigned ByteReg = SelectExpr(Node->getOperand(3));
2214 BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
2216 Opcode = X86::REP_MOVSD;
2218 default: // BYTE aligned
2219 CountReg = SelectExpr(Node->getOperand(3));
2220 Opcode = X86::REP_MOVSB;
2224 // No matter what the alignment is, we put the source in ESI, the
2225 // destination in EDI, and the count in ECX.
2226 unsigned TmpReg1 = SelectExpr(Node->getOperand(1));
2227 unsigned TmpReg2 = SelectExpr(Node->getOperand(2));
2228 BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
2229 BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
2230 BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
2231 BuildMI(BB, Opcode, 0);
2234 assert(0 && "Should not be reached!");
2238 /// createX86PatternInstructionSelector - This pass converts an LLVM function
2239 /// into a machine code representation using pattern matching and a machine
2240 /// description file.
2242 FunctionPass *llvm::createX86PatternInstructionSelector(TargetMachine &TM) {
2243 return new ISel(TM);