1 //===-- X86ISelPattern.cpp - A pattern matching inst selector for X86 -----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a pattern matching instruction selector for X86.
12 //===----------------------------------------------------------------------===//
15 #include "X86InstrBuilder.h"
16 #include "X86RegisterInfo.h"
17 #include "llvm/Function.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/SelectionDAGISel.h"
22 #include "llvm/CodeGen/SSARegMap.h"
23 #include "llvm/Target/TargetData.h"
24 #include "llvm/Target/TargetLowering.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/ADT/Statistic.h"
30 //===----------------------------------------------------------------------===//
31 // X86TargetLowering - X86 Implementation of the TargetLowering interface
33 class X86TargetLowering : public TargetLowering {
34 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
35 int ReturnAddrIndex; // FrameIndex for return slot.
37 X86TargetLowering(TargetMachine &TM) : TargetLowering(TM) {
38 // Set up the TargetLowering object.
39 addRegisterClass(MVT::i8, X86::R8RegisterClass);
40 addRegisterClass(MVT::i16, X86::R16RegisterClass);
41 addRegisterClass(MVT::i32, X86::R32RegisterClass);
42 addRegisterClass(MVT::f64, X86::RFPRegisterClass);
44 // FIXME: Eliminate these two classes when legalize can handle promotions
46 addRegisterClass(MVT::i1, X86::R8RegisterClass);
47 addRegisterClass(MVT::f32, X86::RFPRegisterClass);
49 computeRegisterProperties();
51 setOperationUnsupported(ISD::MUL, MVT::i8);
52 setOperationUnsupported(ISD::SELECT, MVT::i1);
53 setOperationUnsupported(ISD::SELECT, MVT::i8);
55 addLegalFPImmediate(+0.0); // FLD0
56 addLegalFPImmediate(+1.0); // FLD1
57 addLegalFPImmediate(-0.0); // FLD0/FCHS
58 addLegalFPImmediate(-1.0); // FLD1/FCHS
61 /// LowerArguments - This hook must be implemented to indicate how we should
62 /// lower the arguments for the specified function, into the specified DAG.
63 virtual std::vector<SDOperand>
64 LowerArguments(Function &F, SelectionDAG &DAG);
66 /// LowerCallTo - This hook lowers an abstract call to a function into an
68 virtual std::pair<SDOperand, SDOperand>
69 LowerCallTo(SDOperand Chain, const Type *RetTy, SDOperand Callee,
70 ArgListTy &Args, SelectionDAG &DAG);
72 virtual std::pair<SDOperand, SDOperand>
73 LowerVAStart(SDOperand Chain, SelectionDAG &DAG);
75 virtual std::pair<SDOperand,SDOperand>
76 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
77 const Type *ArgTy, SelectionDAG &DAG);
79 virtual std::pair<SDOperand, SDOperand>
80 LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth,
86 std::vector<SDOperand>
87 X86TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
88 std::vector<SDOperand> ArgValues;
90 // Add DAG nodes to load the arguments... On entry to a function on the X86,
91 // the stack frame looks like this:
93 // [ESP] -- return address
94 // [ESP + 4] -- first argument (leftmost lexically)
95 // [ESP + 8] -- second argument, if first argument is four bytes in size
98 MachineFunction &MF = DAG.getMachineFunction();
99 MachineFrameInfo *MFI = MF.getFrameInfo();
101 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
102 for (Function::aiterator I = F.abegin(), E = F.aend(); I != E; ++I) {
103 MVT::ValueType ObjectVT = getValueType(I->getType());
104 unsigned ArgIncrement = 4;
107 default: assert(0 && "Unhandled argument type!");
109 case MVT::i8: ObjSize = 1; break;
110 case MVT::i16: ObjSize = 2; break;
111 case MVT::i32: ObjSize = 4; break;
112 case MVT::i64: ObjSize = ArgIncrement = 8; break;
113 case MVT::f32: ObjSize = 4; break;
114 case MVT::f64: ObjSize = ArgIncrement = 8; break;
116 // Create the frame index object for this incoming parameter...
117 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
119 // Create the SelectionDAG nodes corresponding to a load from this parameter
120 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
122 // Don't codegen dead arguments. FIXME: remove this check when we can nuke
126 ArgValue = DAG.getLoad(ObjectVT, DAG.getEntryNode(), FIN);
128 if (MVT::isInteger(ObjectVT))
129 ArgValue = DAG.getConstant(0, ObjectVT);
131 ArgValue = DAG.getConstantFP(0, ObjectVT);
133 ArgValues.push_back(ArgValue);
135 ArgOffset += ArgIncrement; // Move on to the next argument...
138 // If the function takes variable number of arguments, make a frame index for
139 // the start of the first vararg value... for expansion of llvm.va_start.
141 VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset);
142 ReturnAddrIndex = 0; // No return address slot generated yet.
146 std::pair<SDOperand, SDOperand>
147 X86TargetLowering::LowerCallTo(SDOperand Chain,
148 const Type *RetTy, SDOperand Callee,
149 ArgListTy &Args, SelectionDAG &DAG) {
150 // Count how many bytes are to be pushed on the stack.
151 unsigned NumBytes = 0;
155 Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
156 DAG.getConstant(0, getPointerTy()));
158 for (unsigned i = 0, e = Args.size(); i != e; ++i)
159 switch (getValueType(Args[i].second)) {
160 default: assert(0 && "Unknown value type!");
174 Chain = DAG.getNode(ISD::ADJCALLSTACKDOWN, MVT::Other, Chain,
175 DAG.getConstant(NumBytes, getPointerTy()));
177 // Arguments go on the stack in reverse order, as specified by the ABI.
178 unsigned ArgOffset = 0;
179 SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32);
180 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
182 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
183 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
185 switch (getValueType(Args[i].second)) {
186 default: assert(0 && "Unexpected ValueType for argument!");
190 // Promote the integer to 32 bits. If the input type is signed use a
191 // sign extend, otherwise use a zero extend.
192 if (Args[i].second->isSigned())
193 Args[i].first =DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Args[i].first);
195 Args[i].first =DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Args[i].first);
200 // FIXME: Note that all of these stores are independent of each other.
201 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
202 Args[i].first, PtrOff);
207 // FIXME: Note that all of these stores are independent of each other.
208 Chain = DAG.getNode(ISD::STORE, MVT::Other, Chain,
209 Args[i].first, PtrOff);
216 std::vector<MVT::ValueType> RetVals;
217 MVT::ValueType RetTyVT = getValueType(RetTy);
218 if (RetTyVT != MVT::isVoid)
219 RetVals.push_back(RetTyVT);
220 RetVals.push_back(MVT::Other);
222 SDOperand TheCall = SDOperand(DAG.getCall(RetVals, Chain, Callee), 0);
223 Chain = TheCall.getValue(RetTyVT != MVT::isVoid);
224 Chain = DAG.getNode(ISD::ADJCALLSTACKUP, MVT::Other, Chain,
225 DAG.getConstant(NumBytes, getPointerTy()));
226 return std::make_pair(TheCall, Chain);
229 std::pair<SDOperand, SDOperand>
230 X86TargetLowering::LowerVAStart(SDOperand Chain, SelectionDAG &DAG) {
231 // vastart just returns the address of the VarArgsFrameIndex slot.
232 return std::make_pair(DAG.getFrameIndex(VarArgsFrameIndex, MVT::i32), Chain);
235 std::pair<SDOperand,SDOperand> X86TargetLowering::
236 LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
237 const Type *ArgTy, SelectionDAG &DAG) {
238 MVT::ValueType ArgVT = getValueType(ArgTy);
241 Result = DAG.getLoad(ArgVT, DAG.getEntryNode(), VAList);
244 if (ArgVT == MVT::i32)
247 assert((ArgVT == MVT::i64 || ArgVT == MVT::f64) &&
248 "Other types should have been promoted for varargs!");
251 Result = DAG.getNode(ISD::ADD, VAList.getValueType(), VAList,
252 DAG.getConstant(Amt, VAList.getValueType()));
254 return std::make_pair(Result, Chain);
258 std::pair<SDOperand, SDOperand> X86TargetLowering::
259 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
262 if (Depth) // Depths > 0 not supported yet!
263 Result = DAG.getConstant(0, getPointerTy());
265 if (ReturnAddrIndex == 0) {
266 // Set up a frame object for the return address.
267 MachineFunction &MF = DAG.getMachineFunction();
268 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(4, -4);
271 SDOperand RetAddrFI = DAG.getFrameIndex(ReturnAddrIndex, MVT::i32);
274 // Just load the return address
275 Result = DAG.getLoad(MVT::i32, DAG.getEntryNode(), RetAddrFI);
277 Result = DAG.getNode(ISD::SUB, MVT::i32, RetAddrFI,
278 DAG.getConstant(4, MVT::i32));
280 return std::make_pair(Result, Chain);
289 NumFPKill("x86-codegen", "Number of FP_REG_KILL instructions added");
291 //===--------------------------------------------------------------------===//
292 /// ISel - X86 specific code to select X86 machine instructions for
293 /// SelectionDAG operations.
295 class ISel : public SelectionDAGISel {
296 /// ContainsFPCode - Every instruction we select that uses or defines a FP
297 /// register should set this to true.
300 /// X86Lowering - This object fully describes how to lower LLVM code to an
301 /// X86-specific SelectionDAG.
302 X86TargetLowering X86Lowering;
305 /// ExprMap - As shared expressions are codegen'd, we keep track of which
306 /// vreg the value is produced in, so we only emit one copy of each compiled
308 std::map<SDOperand, unsigned> ExprMap;
309 std::set<SDOperand> LoweredTokens;
312 ISel(TargetMachine &TM) : SelectionDAGISel(X86Lowering), X86Lowering(TM) {
315 /// InstructionSelectBasicBlock - This callback is invoked by
316 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
317 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG) {
318 // While we're doing this, keep track of whether we see any FP code for
319 // FP_REG_KILL insertion.
320 ContainsFPCode = false;
322 // Codegen the basic block.
323 Select(DAG.getRoot());
325 // Insert FP_REG_KILL instructions into basic blocks that need them. This
326 // only occurs due to the floating point stackifier not being aggressive
327 // enough to handle arbitrary global stackification.
329 // Currently we insert an FP_REG_KILL instruction into each block that
330 // uses or defines a floating point virtual register.
332 // When the global register allocators (like linear scan) finally update
333 // live variable analysis, we can keep floating point values in registers
334 // across basic blocks. This will be a huge win, but we are waiting on
335 // the global allocators before we can do this.
337 if (ContainsFPCode && BB->succ_size()) {
338 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
342 // Clear state used for selection.
344 LoweredTokens.clear();
347 void EmitCMP(SDOperand LHS, SDOperand RHS);
348 bool EmitBranchCC(MachineBasicBlock *Dest, SDOperand Cond);
349 unsigned SelectExpr(SDOperand N);
350 bool SelectAddress(SDOperand N, X86AddressMode &AM);
351 void Select(SDOperand N);
355 /// SelectAddress - Add the specified node to the specified addressing mode,
356 /// returning true if it cannot be done.
357 bool ISel::SelectAddress(SDOperand N, X86AddressMode &AM) {
358 switch (N.getOpcode()) {
360 case ISD::FrameIndex:
361 if (AM.BaseType == X86AddressMode::RegBase && AM.Base.Reg == 0) {
362 AM.BaseType = X86AddressMode::FrameIndexBase;
363 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
367 case ISD::GlobalAddress:
369 AM.GV = cast<GlobalAddressSDNode>(N)->getGlobal();
374 AM.Disp += cast<ConstantSDNode>(N)->getValue();
377 if (AM.IndexReg == 0 || AM.Scale == 1)
378 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
379 unsigned Val = CN->getValue();
380 if (Val == 1 || Val == 2 || Val == 3) {
382 AM.IndexReg = SelectExpr(N.Val->getOperand(0));
389 X86AddressMode Backup = AM;
390 if (!SelectAddress(N.Val->getOperand(0), AM) &&
391 !SelectAddress(N.Val->getOperand(1), AM))
398 if (AM.BaseType != X86AddressMode::RegBase ||
402 // Default, generate it as a register.
403 AM.BaseType = X86AddressMode::RegBase;
404 AM.Base.Reg = SelectExpr(N);
408 /// Emit2SetCCsAndLogical - Emit the following sequence of instructions,
409 /// assuming that the temporary registers are in the 8-bit register class.
413 /// DestReg = logicalop Tmp1, Tmp2
415 static void Emit2SetCCsAndLogical(MachineBasicBlock *BB, unsigned SetCC1,
416 unsigned SetCC2, unsigned LogicalOp,
418 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
419 unsigned Tmp1 = RegMap->createVirtualRegister(X86::R8RegisterClass);
420 unsigned Tmp2 = RegMap->createVirtualRegister(X86::R8RegisterClass);
421 BuildMI(BB, SetCC1, 0, Tmp1);
422 BuildMI(BB, SetCC2, 0, Tmp2);
423 BuildMI(BB, LogicalOp, 2, DestReg).addReg(Tmp1).addReg(Tmp2);
426 /// EmitSetCC - Emit the code to set the specified 8-bit register to 1 if the
427 /// condition codes match the specified SetCCOpcode. Note that some conditions
428 /// require multiple instructions to generate the correct value.
429 static void EmitSetCC(MachineBasicBlock *BB, unsigned DestReg,
430 ISD::CondCode SetCCOpcode, bool isFP) {
433 switch (SetCCOpcode) {
434 default: assert(0 && "Illegal integer SetCC!");
435 case ISD::SETEQ: Opc = X86::SETEr; break;
436 case ISD::SETGT: Opc = X86::SETGr; break;
437 case ISD::SETGE: Opc = X86::SETGEr; break;
438 case ISD::SETLT: Opc = X86::SETLr; break;
439 case ISD::SETLE: Opc = X86::SETLEr; break;
440 case ISD::SETNE: Opc = X86::SETNEr; break;
441 case ISD::SETULT: Opc = X86::SETBr; break;
442 case ISD::SETUGT: Opc = X86::SETAr; break;
443 case ISD::SETULE: Opc = X86::SETBEr; break;
444 case ISD::SETUGE: Opc = X86::SETAEr; break;
447 // On a floating point condition, the flags are set as follows:
451 // 1 | 0 | 0 | X == Y
452 // 1 | 1 | 1 | unordered
454 switch (SetCCOpcode) {
455 default: assert(0 && "Invalid FP setcc!");
458 Opc = X86::SETEr; // True if ZF = 1
462 Opc = X86::SETAr; // True if CF = 0 and ZF = 0
466 Opc = X86::SETAEr; // True if CF = 0
470 Opc = X86::SETBr; // True if CF = 1
474 Opc = X86::SETBEr; // True if CF = 1 or ZF = 1
478 Opc = X86::SETNEr; // True if ZF = 0
481 Opc = X86::SETPr; // True if PF = 1
484 Opc = X86::SETNPr; // True if PF = 0
486 case ISD::SETOEQ: // !PF & ZF
487 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETEr, X86::AND8rr, DestReg);
489 case ISD::SETOLT: // !PF & CF
490 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBr, X86::AND8rr, DestReg);
492 case ISD::SETOLE: // !PF & (CF || ZF)
493 Emit2SetCCsAndLogical(BB, X86::SETNPr, X86::SETBEr, X86::AND8rr, DestReg);
495 case ISD::SETUGT: // PF | (!ZF & !CF)
496 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAr, X86::OR8rr, DestReg);
498 case ISD::SETUGE: // PF | !CF
499 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETAEr, X86::OR8rr, DestReg);
501 case ISD::SETUNE: // PF | !ZF
502 Emit2SetCCsAndLogical(BB, X86::SETPr, X86::SETNEr, X86::OR8rr, DestReg);
506 BuildMI(BB, Opc, 0, DestReg);
510 /// EmitBranchCC - Emit code into BB that arranges for control to transfer to
511 /// the Dest block if the Cond condition is true. If we cannot fold this
512 /// condition into the branch, return true.
514 bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Cond) {
515 // FIXME: Evaluate whether it would be good to emit code like (X < Y) | (A >
516 // B) using two conditional branches instead of one condbr, two setcc's, and
518 if ((Cond.getOpcode() == ISD::OR ||
519 Cond.getOpcode() == ISD::AND) && Cond.Val->hasOneUse()) {
520 // And and or set the flags for us, so there is no need to emit a TST of the
521 // result. It is only safe to do this if there is only a single use of the
522 // AND/OR though, otherwise we don't know it will be emitted here.
524 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
528 // Codegen br not C -> JE.
529 if (Cond.getOpcode() == ISD::XOR)
530 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(Cond.Val->getOperand(1)))
531 if (NC->isAllOnesValue()) {
532 unsigned CondR = SelectExpr(Cond.Val->getOperand(0));
533 BuildMI(BB, X86::TEST8rr, 2).addReg(CondR).addReg(CondR);
534 BuildMI(BB, X86::JE, 1).addMBB(Dest);
538 SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond);
540 return true; // Can only handle simple setcc's so far.
544 // Handle integer conditions first.
545 if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
546 switch (SetCC->getCondition()) {
547 default: assert(0 && "Illegal integer SetCC!");
548 case ISD::SETEQ: Opc = X86::JE; break;
549 case ISD::SETGT: Opc = X86::JG; break;
550 case ISD::SETGE: Opc = X86::JGE; break;
551 case ISD::SETLT: Opc = X86::JL; break;
552 case ISD::SETLE: Opc = X86::JLE; break;
553 case ISD::SETNE: Opc = X86::JNE; break;
554 case ISD::SETULT: Opc = X86::JB; break;
555 case ISD::SETUGT: Opc = X86::JA; break;
556 case ISD::SETULE: Opc = X86::JBE; break;
557 case ISD::SETUGE: Opc = X86::JAE; break;
559 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1));
560 BuildMI(BB, Opc, 1).addMBB(Dest);
564 ContainsFPCode = true;
565 unsigned Opc2 = 0; // Second branch if needed.
567 // On a floating point condition, the flags are set as follows:
571 // 1 | 0 | 0 | X == Y
572 // 1 | 1 | 1 | unordered
574 switch (SetCC->getCondition()) {
575 default: assert(0 && "Invalid FP setcc!");
577 case ISD::SETEQ: Opc = X86::JE; break; // True if ZF = 1
579 case ISD::SETGT: Opc = X86::JA; break; // True if CF = 0 and ZF = 0
581 case ISD::SETGE: Opc = X86::JAE; break; // True if CF = 0
583 case ISD::SETLT: Opc = X86::JB; break; // True if CF = 1
585 case ISD::SETLE: Opc = X86::JBE; break; // True if CF = 1 or ZF = 1
587 case ISD::SETNE: Opc = X86::JNE; break; // True if ZF = 0
588 case ISD::SETUO: Opc = X86::JP; break; // True if PF = 1
589 case ISD::SETO: Opc = X86::JNP; break; // True if PF = 0
590 case ISD::SETUGT: // PF = 1 | (ZF = 0 & CF = 0)
591 Opc = X86::JA; // ZF = 0 & CF = 0
592 Opc2 = X86::JP; // PF = 1
594 case ISD::SETUGE: // PF = 1 | CF = 0
595 Opc = X86::JAE; // CF = 0
596 Opc2 = X86::JP; // PF = 1
598 case ISD::SETUNE: // PF = 1 | ZF = 0
599 Opc = X86::JNE; // ZF = 0
600 Opc2 = X86::JP; // PF = 1
602 case ISD::SETOEQ: // PF = 0 & ZF = 1
605 return true; // FIXME: Emit more efficient code for this branch.
606 case ISD::SETOLT: // PF = 0 & CF = 1
609 return true; // FIXME: Emit more efficient code for this branch.
610 case ISD::SETOLE: // PF = 0 & (CF = 1 || ZF = 1)
613 return true; // FIXME: Emit more efficient code for this branch.
616 EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1));
617 BuildMI(BB, Opc, 1).addMBB(Dest);
619 BuildMI(BB, Opc2, 1).addMBB(Dest);
623 void ISel::EmitCMP(SDOperand LHS, SDOperand RHS) {
624 unsigned Tmp1 = SelectExpr(LHS), Opc;
625 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
627 switch (RHS.getValueType()) {
630 case MVT::i8: Opc = X86::CMP8ri; break;
631 case MVT::i16: Opc = X86::CMP16ri; break;
632 case MVT::i32: Opc = X86::CMP32ri; break;
635 BuildMI(BB, Opc, 2).addReg(Tmp1).addImm(CN->getValue());
640 switch (LHS.getValueType()) {
641 default: assert(0 && "Cannot compare this value!");
643 case MVT::i8: Opc = X86::CMP8rr; break;
644 case MVT::i16: Opc = X86::CMP16rr; break;
645 case MVT::i32: Opc = X86::CMP32rr; break;
647 case MVT::f64: Opc = X86::FUCOMIr; ContainsFPCode = true; break;
649 unsigned Tmp2 = SelectExpr(RHS);
650 BuildMI(BB, Opc, 2).addReg(Tmp1).addReg(Tmp2);
653 unsigned ISel::SelectExpr(SDOperand N) {
655 unsigned Tmp1, Tmp2, Tmp3;
658 SDNode *Node = N.Val;
660 if (N.getOpcode() == ISD::CopyFromReg)
661 // Just use the specified register as our input.
662 return dyn_cast<CopyRegSDNode>(N)->getReg();
664 // If there are multiple uses of this expression, memorize the
665 // register it is code generated in, instead of emitting it multiple
667 // FIXME: Disabled for our current selection model.
668 if (1 || !Node->hasOneUse()) {
669 unsigned &Reg = ExprMap[N];
672 if (N.getOpcode() != ISD::CALL)
673 Reg = Result = (N.getValueType() != MVT::Other) ?
674 MakeReg(N.getValueType()) : 1;
676 // If this is a call instruction, make sure to prepare ALL of the result
677 // values as well as the chain.
678 if (Node->getNumValues() == 1)
679 Reg = Result = 1; // Void call, just a chain.
681 Result = MakeReg(Node->getValueType(0));
682 ExprMap[N.getValue(0)] = Result;
683 for (unsigned i = 1, e = N.Val->getNumValues()-1; i != e; ++i)
684 ExprMap[N.getValue(i)] = MakeReg(Node->getValueType(i));
685 ExprMap[SDOperand(Node, Node->getNumValues()-1)] = 1;
689 Result = MakeReg(N.getValueType());
692 switch (N.getOpcode()) {
695 assert(0 && "Node not handled!\n");
696 case ISD::FrameIndex:
697 Tmp1 = cast<FrameIndexSDNode>(N)->getIndex();
698 addFrameReference(BuildMI(BB, X86::LEA32r, 4, Result), (int)Tmp1);
700 case ISD::ConstantPool:
701 Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
702 addConstantPoolReference(BuildMI(BB, X86::LEA32r, 4, Result), Tmp1);
704 case ISD::ConstantFP:
705 ContainsFPCode = true;
706 Tmp1 = Result; // Intermediate Register
707 if (cast<ConstantFPSDNode>(N)->getValue() < 0.0 ||
708 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
709 Tmp1 = MakeReg(MVT::f64);
711 if (cast<ConstantFPSDNode>(N)->isExactlyValue(+0.0) ||
712 cast<ConstantFPSDNode>(N)->isExactlyValue(-0.0))
713 BuildMI(BB, X86::FLD0, 0, Tmp1);
714 else if (cast<ConstantFPSDNode>(N)->isExactlyValue(+1.0) ||
715 cast<ConstantFPSDNode>(N)->isExactlyValue(-1.0))
716 BuildMI(BB, X86::FLD1, 0, Tmp1);
718 assert(0 && "Unexpected constant!");
720 BuildMI(BB, X86::FCHS, 1, Result).addReg(Tmp1);
723 switch (N.getValueType()) {
724 default: assert(0 && "Cannot use constants of this type!");
726 case MVT::i8: Opc = X86::MOV8ri; break;
727 case MVT::i16: Opc = X86::MOV16ri; break;
728 case MVT::i32: Opc = X86::MOV32ri; break;
730 BuildMI(BB, Opc, 1,Result).addImm(cast<ConstantSDNode>(N)->getValue());
732 case ISD::GlobalAddress: {
733 GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
734 BuildMI(BB, X86::MOV32ri, 1, Result).addGlobalAddress(GV);
737 case ISD::ExternalSymbol: {
738 const char *Sym = cast<ExternalSymbolSDNode>(N)->getSymbol();
739 BuildMI(BB, X86::MOV32ri, 1, Result).addExternalSymbol(Sym);
743 Tmp1 = SelectExpr(N.getOperand(0));
744 BuildMI(BB, X86::FpMOV, 1, Result).addReg(Tmp1);
745 ContainsFPCode = true;
747 case ISD::ZERO_EXTEND: {
748 int DestIs16 = N.getValueType() == MVT::i16;
749 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
751 static const unsigned Opc[3] = {
752 X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOVZX16rr8
754 Tmp1 = SelectExpr(N.getOperand(0));
755 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
758 case ISD::SIGN_EXTEND: {
759 int DestIs16 = N.getValueType() == MVT::i16;
760 int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
762 static const unsigned Opc[3] = {
763 X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOVSX16rr8
765 Tmp1 = SelectExpr(N.getOperand(0));
766 BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1);
770 // Handle cast of LARGER int to SMALLER int using a move to EAX followed by
771 // a move out of AX or AL.
772 switch (N.getOperand(0).getValueType()) {
773 default: assert(0 && "Unknown truncate!");
774 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
775 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
776 case MVT::i32: Tmp2 = X86::EAX; Opc = X86::MOV32rr; break;
778 Tmp1 = SelectExpr(N.getOperand(0));
779 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
781 switch (N.getValueType()) {
782 default: assert(0 && "Unknown truncate!");
784 case MVT::i8: Tmp2 = X86::AL; Opc = X86::MOV8rr; break;
785 case MVT::i16: Tmp2 = X86::AX; Opc = X86::MOV16rr; break;
787 BuildMI(BB, Opc, 1, Result).addReg(Tmp2);
791 // Truncate from double to float by storing to memory as float,
792 // then reading it back into a register.
794 // Create as stack slot to use.
795 Tmp1 = TLI.getTargetData().getFloatAlignment();
796 Tmp2 = BB->getParent()->getFrameInfo()->CreateStackObject(4, Tmp1);
798 // Codegen the input.
799 Tmp1 = SelectExpr(N.getOperand(0));
801 // Emit the store, then the reload.
802 addFrameReference(BuildMI(BB, X86::FST32m, 5), Tmp2).addReg(Tmp1);
803 addFrameReference(BuildMI(BB, X86::FLD32m, 5, Result), Tmp2);
804 ContainsFPCode = true;
807 // See if we can codegen this as an LEA to fold operations together.
808 if (N.getValueType() == MVT::i32) {
810 if (!SelectAddress(N.getOperand(0), AM) &&
811 !SelectAddress(N.getOperand(1), AM)) {
812 // If this is not just an add, emit the LEA. For a simple add (like
813 // reg+reg or reg+imm), we just emit an add. If might be a good idea to
814 // leave this as LEA, then peephole it to 'ADD' after two address elim
816 if (AM.Scale != 1 || AM.BaseType == X86AddressMode::FrameIndexBase ||
817 AM.Base.Reg && AM.IndexReg && (AM.Disp || AM.GV)) {
818 addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), AM);
823 Tmp1 = SelectExpr(N.getOperand(0));
824 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
826 if (CN->getValue() == 1) { // add X, 1 -> inc X
827 switch (N.getValueType()) {
828 default: assert(0 && "Cannot integer add this type!");
829 case MVT::i8: Opc = X86::INC8r; break;
830 case MVT::i16: Opc = X86::INC16r; break;
831 case MVT::i32: Opc = X86::INC32r; break;
833 } else if (CN->isAllOnesValue()) { // add X, -1 -> dec X
834 switch (N.getValueType()) {
835 default: assert(0 && "Cannot integer add this type!");
836 case MVT::i8: Opc = X86::DEC8r; break;
837 case MVT::i16: Opc = X86::DEC16r; break;
838 case MVT::i32: Opc = X86::DEC32r; break;
843 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
847 switch (N.getValueType()) {
848 default: assert(0 && "Cannot add this type!");
849 case MVT::i8: Opc = X86::ADD8ri; break;
850 case MVT::i16: Opc = X86::ADD16ri; break;
851 case MVT::i32: Opc = X86::ADD32ri; break;
854 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
859 Tmp2 = SelectExpr(N.getOperand(1));
860 switch (N.getValueType()) {
861 default: assert(0 && "Cannot add this type!");
862 case MVT::i8: Opc = X86::ADD8rr; break;
863 case MVT::i16: Opc = X86::ADD16rr; break;
864 case MVT::i32: Opc = X86::ADD32rr; break;
866 case MVT::f64: Opc = X86::FpADD; ContainsFPCode = true; break;
868 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
871 if (MVT::isInteger(N.getValueType()))
872 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(0)))
873 if (CN->isNullValue()) { // 0 - N -> neg N
874 switch (N.getValueType()) {
875 default: assert(0 && "Cannot sub this type!");
877 case MVT::i8: Opc = X86::NEG8r; break;
878 case MVT::i16: Opc = X86::NEG16r; break;
879 case MVT::i32: Opc = X86::NEG32r; break;
881 Tmp1 = SelectExpr(N.getOperand(1));
882 BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
886 Tmp1 = SelectExpr(N.getOperand(0));
887 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
888 switch (N.getValueType()) {
889 default: assert(0 && "Cannot sub this type!");
891 case MVT::i8: Opc = X86::SUB8ri; break;
892 case MVT::i16: Opc = X86::SUB16ri; break;
893 case MVT::i32: Opc = X86::SUB32ri; break;
895 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
898 Tmp2 = SelectExpr(N.getOperand(1));
899 switch (N.getValueType()) {
900 default: assert(0 && "Cannot add this type!");
902 case MVT::i8: Opc = X86::SUB8rr; break;
903 case MVT::i16: Opc = X86::SUB16rr; break;
904 case MVT::i32: Opc = X86::SUB32rr; break;
906 case MVT::f64: Opc = X86::FpSUB; ContainsFPCode = true; break;
908 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
912 Tmp1 = SelectExpr(N.getOperand(0));
913 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
914 switch (N.getValueType()) {
915 default: assert(0 && "Cannot add this type!");
917 case MVT::i8: Opc = X86::AND8ri; break;
918 case MVT::i16: Opc = X86::AND16ri; break;
919 case MVT::i32: Opc = X86::AND32ri; break;
921 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
924 Tmp2 = SelectExpr(N.getOperand(1));
925 switch (N.getValueType()) {
926 default: assert(0 && "Cannot add this type!");
928 case MVT::i8: Opc = X86::AND8rr; break;
929 case MVT::i16: Opc = X86::AND16rr; break;
930 case MVT::i32: Opc = X86::AND32rr; break;
932 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
935 Tmp1 = SelectExpr(N.getOperand(0));
936 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
937 switch (N.getValueType()) {
938 default: assert(0 && "Cannot add this type!");
940 case MVT::i8: Opc = X86::OR8ri; break;
941 case MVT::i16: Opc = X86::OR16ri; break;
942 case MVT::i32: Opc = X86::OR32ri; break;
944 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
947 Tmp2 = SelectExpr(N.getOperand(1));
948 switch (N.getValueType()) {
949 default: assert(0 && "Cannot add this type!");
951 case MVT::i8: Opc = X86::OR8rr; break;
952 case MVT::i16: Opc = X86::OR16rr; break;
953 case MVT::i32: Opc = X86::OR32rr; break;
955 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
958 Tmp1 = SelectExpr(N.getOperand(0));
959 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
960 switch (N.getValueType()) {
961 default: assert(0 && "Cannot add this type!");
963 case MVT::i8: Opc = X86::XOR8ri; break;
964 case MVT::i16: Opc = X86::XOR16ri; break;
965 case MVT::i32: Opc = X86::XOR32ri; break;
967 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
970 Tmp2 = SelectExpr(N.getOperand(1));
971 switch (N.getValueType()) {
972 default: assert(0 && "Cannot add this type!");
974 case MVT::i8: Opc = X86::XOR8rr; break;
975 case MVT::i16: Opc = X86::XOR16rr; break;
976 case MVT::i32: Opc = X86::XOR32rr; break;
978 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
982 Tmp1 = SelectExpr(N.getOperand(0));
983 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
985 switch (N.getValueType()) {
986 default: assert(0 && "Cannot multiply this type!");
988 case MVT::i16: Opc = X86::IMUL16rri; break;
989 case MVT::i32: Opc = X86::IMUL32rri; break;
992 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
996 Tmp2 = SelectExpr(N.getOperand(1));
997 switch (N.getValueType()) {
998 default: assert(0 && "Cannot add this type!");
999 case MVT::i8: assert(0 && "FIXME: MUL i8 not implemented yet!");
1000 case MVT::i16: Opc = X86::IMUL16rr; break;
1001 case MVT::i32: Opc = X86::IMUL32rr; break;
1003 case MVT::f64: Opc = X86::FpMUL; ContainsFPCode = true; break;
1005 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1009 // FIXME: implement folding of setcc into select.
1010 if (N.getValueType() != MVT::i1 && N.getValueType() != MVT::i8) {
1011 Tmp2 = SelectExpr(N.getOperand(1));
1012 Tmp3 = SelectExpr(N.getOperand(2));
1013 Tmp1 = SelectExpr(N.getOperand(0));
1015 switch (N.getValueType()) {
1016 default: assert(0 && "Cannot select this type!");
1017 case MVT::i16: Opc = X86::CMOVE16rr; break;
1018 case MVT::i32: Opc = X86::CMOVE32rr; break;
1020 case MVT::f64: Opc = X86::FCMOVE; ContainsFPCode = true; break;
1023 // Get the condition into the zero flag.
1024 BuildMI(BB, X86::TEST8rr, 2).addReg(Tmp1).addReg(Tmp1);
1025 BuildMI(BB, Opc, 2, Result).addReg(Tmp2).addReg(Tmp3);
1028 // FIXME: This should not be implemented here, it should be in the generic
1030 Tmp2 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1032 Tmp3 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16,
1034 Tmp1 = SelectExpr(N.getOperand(0));
1035 BuildMI(BB, X86::TEST8rr, 2).addReg(Tmp1).addReg(Tmp1);
1036 // FIXME: need subregs to do better than this!
1037 unsigned TmpReg = MakeReg(MVT::i16);
1038 BuildMI(BB, X86::CMOVE16rr, 2, TmpReg).addReg(Tmp2).addReg(Tmp3);
1039 BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(TmpReg);
1040 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1048 Tmp1 = SelectExpr(N.getOperand(0));
1050 if (N.getOpcode() == ISD::SDIV)
1051 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1052 // FIXME: These special cases should be handled by the lowering impl!
1053 unsigned RHS = CN->getValue();
1059 if (RHS && (RHS & (RHS-1)) == 0) { // Signed division by power of 2?
1060 unsigned Log = log2(RHS);
1061 unsigned TmpReg = MakeReg(N.getValueType());
1062 unsigned SAROpc, SHROpc, ADDOpc, NEGOpc;
1063 switch (N.getValueType()) {
1064 default: assert("Unknown type to signed divide!");
1066 SAROpc = X86::SAR8ri;
1067 SHROpc = X86::SHR8ri;
1068 ADDOpc = X86::ADD8rr;
1069 NEGOpc = X86::NEG8r;
1072 SAROpc = X86::SAR16ri;
1073 SHROpc = X86::SHR16ri;
1074 ADDOpc = X86::ADD16rr;
1075 NEGOpc = X86::NEG16r;
1078 SAROpc = X86::SAR32ri;
1079 SHROpc = X86::SHR32ri;
1080 ADDOpc = X86::ADD32rr;
1081 NEGOpc = X86::NEG32r;
1084 BuildMI(BB, SAROpc, 2, TmpReg).addReg(Tmp1).addImm(Log-1);
1085 unsigned TmpReg2 = MakeReg(N.getValueType());
1086 BuildMI(BB, SHROpc, 2, TmpReg2).addReg(TmpReg).addImm(32-Log);
1087 unsigned TmpReg3 = MakeReg(N.getValueType());
1088 BuildMI(BB, ADDOpc, 2, TmpReg3).addReg(Tmp1).addReg(TmpReg2);
1090 unsigned TmpReg4 = isNeg ? MakeReg(N.getValueType()) : Result;
1091 BuildMI(BB, SAROpc, 2, TmpReg4).addReg(TmpReg3).addImm(Log);
1093 BuildMI(BB, NEGOpc, 1, Result).addReg(TmpReg4);
1098 Tmp2 = SelectExpr(N.getOperand(1));
1100 bool isSigned = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::SREM;
1101 bool isDiv = N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::UDIV;
1102 unsigned LoReg, HiReg, DivOpcode, MovOpcode, ClrOpcode, SExtOpcode;
1103 switch (N.getValueType()) {
1104 default: assert(0 && "Cannot sdiv this type!");
1106 DivOpcode = isSigned ? X86::IDIV8r : X86::DIV8r;
1109 MovOpcode = X86::MOV8rr;
1110 ClrOpcode = X86::MOV8ri;
1111 SExtOpcode = X86::CBW;
1114 DivOpcode = isSigned ? X86::IDIV16r : X86::DIV16r;
1117 MovOpcode = X86::MOV16rr;
1118 ClrOpcode = X86::MOV16ri;
1119 SExtOpcode = X86::CWD;
1122 DivOpcode = isSigned ? X86::IDIV32r : X86::DIV32r;
1125 MovOpcode = X86::MOV32rr;
1126 ClrOpcode = X86::MOV32ri;
1127 SExtOpcode = X86::CDQ;
1129 case MVT::i64: assert(0 && "FIXME: implement i64 DIV/REM libcalls!");
1132 ContainsFPCode = true;
1133 if (N.getOpcode() == ISD::SDIV)
1134 BuildMI(BB, X86::FpDIV, 2, Result).addReg(Tmp1).addReg(Tmp2);
1136 assert(0 && "FIXME: Emit frem libcall to fmod!");
1140 // Set up the low part.
1141 BuildMI(BB, MovOpcode, 1, LoReg).addReg(Tmp1);
1144 // Sign extend the low part into the high part.
1145 BuildMI(BB, SExtOpcode, 0);
1147 // Zero out the high part, effectively zero extending the input.
1148 BuildMI(BB, ClrOpcode, 1, HiReg).addImm(0);
1151 // Emit the DIV/IDIV instruction.
1152 BuildMI(BB, DivOpcode, 1).addReg(Tmp2);
1154 // Get the result of the divide or rem.
1155 BuildMI(BB, MovOpcode, 1, Result).addReg(isDiv ? LoReg : HiReg);
1160 Tmp1 = SelectExpr(N.getOperand(0));
1161 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1162 switch (N.getValueType()) {
1163 default: assert(0 && "Cannot shift this type!");
1164 case MVT::i8: Opc = X86::SHL8ri; break;
1165 case MVT::i16: Opc = X86::SHL16ri; break;
1166 case MVT::i32: Opc = X86::SHL32ri; break;
1168 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1171 Tmp2 = SelectExpr(N.getOperand(1));
1172 switch (N.getValueType()) {
1173 default: assert(0 && "Cannot shift this type!");
1174 case MVT::i8 : Opc = X86::SHL8rCL; break;
1175 case MVT::i16: Opc = X86::SHL16rCL; break;
1176 case MVT::i32: Opc = X86::SHL32rCL; break;
1178 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1179 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1182 Tmp1 = SelectExpr(N.getOperand(0));
1183 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1184 switch (N.getValueType()) {
1185 default: assert(0 && "Cannot shift this type!");
1186 case MVT::i8: Opc = X86::SHR8ri; break;
1187 case MVT::i16: Opc = X86::SHR16ri; break;
1188 case MVT::i32: Opc = X86::SHR32ri; break;
1190 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1193 Tmp2 = SelectExpr(N.getOperand(1));
1194 switch (N.getValueType()) {
1195 default: assert(0 && "Cannot shift this type!");
1196 case MVT::i8 : Opc = X86::SHR8rCL; break;
1197 case MVT::i16: Opc = X86::SHR16rCL; break;
1198 case MVT::i32: Opc = X86::SHR32rCL; break;
1200 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1201 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1204 Tmp1 = SelectExpr(N.getOperand(0));
1205 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1206 switch (N.getValueType()) {
1207 default: assert(0 && "Cannot shift this type!");
1208 case MVT::i8: Opc = X86::SAR8ri; break;
1209 case MVT::i16: Opc = X86::SAR16ri; break;
1210 case MVT::i32: Opc = X86::SAR32ri; break;
1212 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addImm(CN->getValue());
1215 Tmp2 = SelectExpr(N.getOperand(1));
1216 switch (N.getValueType()) {
1217 default: assert(0 && "Cannot shift this type!");
1218 case MVT::i8 : Opc = X86::SAR8rCL; break;
1219 case MVT::i16: Opc = X86::SAR16rCL; break;
1220 case MVT::i32: Opc = X86::SAR32rCL; break;
1222 BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
1223 BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
1227 if (MVT::isFloatingPoint(N.getOperand(0).getValueType()))
1228 ContainsFPCode = true;
1229 EmitCMP(N.getOperand(0), N.getOperand(1));
1230 EmitSetCC(BB, Result, cast<SetCCSDNode>(N)->getCondition(),
1231 MVT::isFloatingPoint(N.getOperand(1).getValueType()));
1234 // The chain for this load is now lowered.
1235 LoweredTokens.insert(SDOperand(Node, 1));
1236 Select(N.getOperand(0));
1238 // Make sure we generate both values.
1240 ExprMap[N.getValue(1)] = 1; // Generate the token
1242 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1244 switch (Node->getValueType(0)) {
1245 default: assert(0 && "Cannot load this type!");
1247 case MVT::i8: Opc = X86::MOV8rm; break;
1248 case MVT::i16: Opc = X86::MOV16rm; break;
1249 case MVT::i32: Opc = X86::MOV32rm; break;
1250 case MVT::f32: Opc = X86::FLD32m; ContainsFPCode = true; break;
1251 case MVT::f64: Opc = X86::FLD64m; ContainsFPCode = true; break;
1253 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1))){
1254 addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CP->getIndex());
1257 SelectAddress(N.getOperand(1), AM);
1258 addFullAddress(BuildMI(BB, Opc, 4, Result), AM);
1262 case ISD::DYNAMIC_STACKALLOC:
1263 Select(N.getOperand(0));
1265 // Generate both result values.
1267 ExprMap[N.getValue(1)] = 1; // Generate the token
1269 Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType());
1271 // FIXME: We are currently ignoring the requested alignment for handling
1272 // greater than the stack alignment. This will need to be revisited at some
1273 // point. Align = N.getOperand(2);
1275 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
1276 cast<ConstantSDNode>(N.getOperand(2))->getValue() != 0) {
1277 std::cerr << "Cannot allocate stack object with greater alignment than"
1278 << " the stack alignment yet!";
1282 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1283 BuildMI(BB, X86::SUB32ri, 2, X86::ESP).addReg(X86::ESP)
1284 .addImm(CN->getValue());
1286 Tmp1 = SelectExpr(N.getOperand(1));
1288 // Subtract size from stack pointer, thereby allocating some space.
1289 BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(Tmp1);
1292 // Put a pointer to the space into the result register, by copying the stack
1294 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::ESP);
1298 // The chain for this call is now lowered.
1299 LoweredTokens.insert(N.getValue(Node->getNumValues()-1));
1301 Select(N.getOperand(0));
1302 if (GlobalAddressSDNode *GASD =
1303 dyn_cast<GlobalAddressSDNode>(N.getOperand(1))) {
1304 BuildMI(BB, X86::CALLpcrel32, 1).addGlobalAddress(GASD->getGlobal(),true);
1305 } else if (ExternalSymbolSDNode *ESSDN =
1306 dyn_cast<ExternalSymbolSDNode>(N.getOperand(1))) {
1307 BuildMI(BB, X86::CALLpcrel32,
1308 1).addExternalSymbol(ESSDN->getSymbol(), true);
1310 Tmp1 = SelectExpr(N.getOperand(1));
1311 BuildMI(BB, X86::CALL32r, 1).addReg(Tmp1);
1313 switch (Node->getValueType(0)) {
1314 default: assert(0 && "Unknown value type for call result!");
1315 case MVT::Other: return 1;
1318 BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL);
1321 BuildMI(BB, X86::MOV16rr, 1, Result).addReg(X86::AX);
1324 BuildMI(BB, X86::MOV32rr, 1, Result).addReg(X86::EAX);
1325 if (Node->getValueType(1) == MVT::i32)
1326 BuildMI(BB, X86::MOV32rr, 1, Result+1).addReg(X86::EDX);
1329 case MVT::f64: // Floating-point return values live in %ST(0)
1330 ContainsFPCode = true;
1331 BuildMI(BB, X86::FpGETRESULT, 1, Result);
1334 return Result+N.ResNo;
1340 void ISel::Select(SDOperand N) {
1341 unsigned Tmp1, Tmp2, Opc;
1343 // FIXME: Disable for our current expansion model!
1344 if (/*!N->hasOneUse() &&*/ !LoweredTokens.insert(N).second)
1345 return; // Already selected.
1347 switch (N.getOpcode()) {
1349 N.Val->dump(); std::cerr << "\n";
1350 assert(0 && "Node not handled yet!");
1351 case ISD::EntryToken: return; // Noop
1352 case ISD::CopyToReg:
1353 Select(N.getOperand(0));
1354 Tmp1 = SelectExpr(N.getOperand(1));
1355 Tmp2 = cast<CopyRegSDNode>(N)->getReg();
1358 switch (N.getOperand(1).getValueType()) {
1359 default: assert(0 && "Invalid type for operation!");
1361 case MVT::i8: Opc = X86::MOV8rr; break;
1362 case MVT::i16: Opc = X86::MOV16rr; break;
1363 case MVT::i32: Opc = X86::MOV32rr; break;
1365 case MVT::f64: Opc = X86::FpMOV; break;
1367 BuildMI(BB, Opc, 1, Tmp2).addReg(Tmp1);
1371 Select(N.getOperand(0));
1372 switch (N.getNumOperands()) {
1374 assert(0 && "Unknown return instruction!");
1376 assert(N.getOperand(1).getValueType() == MVT::i32 &&
1377 N.getOperand(2).getValueType() == MVT::i32 &&
1378 "Unknown two-register value!");
1379 Tmp1 = SelectExpr(N.getOperand(1));
1380 Tmp2 = SelectExpr(N.getOperand(2));
1381 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
1382 BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(Tmp2);
1383 // Declare that EAX & EDX are live on exit.
1384 BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
1388 Tmp1 = SelectExpr(N.getOperand(1));
1389 switch (N.getOperand(1).getValueType()) {
1390 default: assert(0 && "All other types should have been promoted!!");
1392 BuildMI(BB, X86::FpSETRESULT, 1).addReg(Tmp1);
1393 // Declare that top-of-stack is live on exit
1394 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
1397 BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1);
1398 BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::EAX).addReg(X86::ESP);
1405 BuildMI(BB, X86::RET, 0); // Just emit a 'ret' instruction
1408 Select(N.getOperand(0));
1409 MachineBasicBlock *Dest =
1410 cast<BasicBlockSDNode>(N.getOperand(1))->getBasicBlock();
1411 BuildMI(BB, X86::JMP, 1).addMBB(Dest);
1416 Select(N.getOperand(0));
1417 MachineBasicBlock *Dest =
1418 cast<BasicBlockSDNode>(N.getOperand(2))->getBasicBlock();
1419 // Try to fold a setcc into the branch. If this fails, emit a test/jne
1421 if (EmitBranchCC(Dest, N.getOperand(1))) {
1422 Tmp1 = SelectExpr(N.getOperand(1));
1423 BuildMI(BB, X86::TEST8rr, 2).addReg(Tmp1).addReg(Tmp1);
1424 BuildMI(BB, X86::JNE, 1).addMBB(Dest);
1430 case ISD::DYNAMIC_STACKALLOC:
1434 Select(N.getOperand(0));
1435 // Select the address.
1437 SelectAddress(N.getOperand(2), AM);
1439 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1441 switch (CN->getValueType(0)) {
1442 default: assert(0 && "Invalid type for operation!");
1444 case MVT::i8: Opc = X86::MOV8mi; break;
1445 case MVT::i16: Opc = X86::MOV16mi; break;
1446 case MVT::i32: Opc = X86::MOV32mi; break;
1448 case MVT::f64: break;
1451 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addImm(CN->getValue());
1455 Tmp1 = SelectExpr(N.getOperand(1));
1457 switch (N.getOperand(1).getValueType()) {
1458 default: assert(0 && "Cannot store this type!");
1460 case MVT::i8: Opc = X86::MOV8mr; break;
1461 case MVT::i16: Opc = X86::MOV16mr; break;
1462 case MVT::i32: Opc = X86::MOV32mr; break;
1463 case MVT::f32: Opc = X86::FST32m; ContainsFPCode = true; break;
1464 case MVT::f64: Opc = X86::FST64m; ContainsFPCode = true; break;
1466 addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1);
1469 case ISD::ADJCALLSTACKDOWN:
1470 case ISD::ADJCALLSTACKUP:
1471 Select(N.getOperand(0));
1472 Tmp1 = cast<ConstantSDNode>(N.getOperand(1))->getValue();
1474 Opc = N.getOpcode() == ISD::ADJCALLSTACKDOWN ? X86::ADJCALLSTACKDOWN :
1475 X86::ADJCALLSTACKUP;
1476 BuildMI(BB, Opc, 1).addImm(Tmp1);
1479 assert(0 && "Should not be reached!");
1483 /// createX86PatternInstructionSelector - This pass converts an LLVM function
1484 /// into a machine code representation using pattern matching and a machine
1485 /// description file.
1487 FunctionPass *llvm::createX86PatternInstructionSelector(TargetMachine &TM) {
1488 return new ISel(TM);