X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;ds=sidebyside;f=lib%2FTarget%2FX86%2FX86ISelPattern.cpp;h=a75854e5592787c1953ec46fcf45f3c1523393c1;hb=90d1be7eefdc98d12ea5ddd13715e2bfc8679351;hp=c7919bdb5adb1f8c343209fde8a03157b4c2a531;hpb=837caa722376d3f9539d3e44e39ce7cd5a62b733;p=oota-llvm.git diff --git a/lib/Target/X86/X86ISelPattern.cpp b/lib/Target/X86/X86ISelPattern.cpp index c7919bdb5ad..a75854e5592 100644 --- a/lib/Target/X86/X86ISelPattern.cpp +++ b/lib/Target/X86/X86ISelPattern.cpp @@ -27,6 +27,7 @@ #include "llvm/Support/MathExtras.h" #include "llvm/ADT/Statistic.h" #include +#include using namespace llvm; //===----------------------------------------------------------------------===// @@ -38,6 +39,12 @@ namespace { public: X86TargetLowering(TargetMachine &TM) : TargetLowering(TM) { // Set up the TargetLowering object. + + // X86 is wierd, it always uses i8 for shift amounts and setcc results. + setShiftAmountType(MVT::i8); + setSetCCResultType(MVT::i8); + + // Set up the register classes. addRegisterClass(MVT::i8, X86::R8RegisterClass); addRegisterClass(MVT::i16, X86::R16RegisterClass); addRegisterClass(MVT::i32, X86::R32RegisterClass); @@ -45,16 +52,23 @@ namespace { // FIXME: Eliminate these two classes when legalize can handle promotions // well. - addRegisterClass(MVT::i1, X86::R8RegisterClass); - addRegisterClass(MVT::f32, X86::RFPRegisterClass); +/**/ addRegisterClass(MVT::i1, X86::R8RegisterClass); +/**/ //addRegisterClass(MVT::f32, X86::RFPRegisterClass); + + setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand); + setOperationAction(ISD::ZERO_EXTEND_INREG, MVT::i16 , Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); + setOperationAction(ISD::ZERO_EXTEND_INREG, MVT::i1 , Expand); + setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); + setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand); + setOperationAction(ISD::SREM , MVT::f64 , Expand); + + // These should be promoted to a larger select which is supported. +/**/ setOperationAction(ISD::SELECT , MVT::i1 , Promote); + setOperationAction(ISD::SELECT , MVT::i8 , Promote); computeRegisterProperties(); - - setOperationUnsupported(ISD::MEMMOVE, MVT::Other); - - setOperationUnsupported(ISD::MUL, MVT::i8); - setOperationUnsupported(ISD::SELECT, MVT::i1); - setOperationUnsupported(ISD::SELECT, MVT::i8); addLegalFPImmediate(+0.0); // FLD0 addLegalFPImmediate(+1.0); // FLD1 @@ -180,7 +194,8 @@ X86TargetLowering::LowerCallTo(SDOperand Chain, // Arguments go on the stack in reverse order, as specified by the ABI. unsigned ArgOffset = 0; - SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32); + SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32, + DAG.getEntryNode()); for (unsigned i = 0, e = Args.size(); i != e; ++i) { unsigned ArgReg; SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); @@ -326,48 +341,13 @@ namespace { /// InstructionSelectBasicBlock - This callback is invoked by /// SelectionDAGISel when it has created a SelectionDAG for us to codegen. - virtual void InstructionSelectBasicBlock(SelectionDAG &DAG) { - // While we're doing this, keep track of whether we see any FP code for - // FP_REG_KILL insertion. - ContainsFPCode = false; - - // Compute the RegPressureMap, which is an approximation for the number of - // registers required to compute each node. - ComputeRegPressure(DAG.getRoot()); + virtual void InstructionSelectBasicBlock(SelectionDAG &DAG); - //DAG.viewGraph(); - - // Codegen the basic block. - Select(DAG.getRoot()); - - // Insert FP_REG_KILL instructions into basic blocks that need them. This - // only occurs due to the floating point stackifier not being aggressive - // enough to handle arbitrary global stackification. - // - // Currently we insert an FP_REG_KILL instruction into each block that - // uses or defines a floating point virtual register. - // - // When the global register allocators (like linear scan) finally update - // live variable analysis, we can keep floating point values in registers - // across basic blocks. This will be a huge win, but we are waiting on - // the global allocators before we can do this. - // - if (ContainsFPCode && BB->succ_size()) { - BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0); - ++NumFPKill; - } - - // Clear state used for selection. - ExprMap.clear(); - LoweredTokens.clear(); - RegPressureMap.clear(); - } - - bool isFoldableLoad(SDOperand Op); + bool isFoldableLoad(SDOperand Op, SDOperand OtherOp); void EmitFoldedLoad(SDOperand Op, X86AddressMode &AM); + bool TryToFoldLoadOpStore(SDNode *Node); - - void EmitCMP(SDOperand LHS, SDOperand RHS); + void EmitCMP(SDOperand LHS, SDOperand RHS, bool isOnlyUse); bool EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, SDOperand Cond); void EmitSelectCC(SDOperand Cond, MVT::ValueType SVT, unsigned RTrue, unsigned RFalse, unsigned RDest); @@ -377,6 +357,72 @@ namespace { }; } +/// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel +/// when it has created a SelectionDAG for us to codegen. +void ISel::InstructionSelectBasicBlock(SelectionDAG &DAG) { + // While we're doing this, keep track of whether we see any FP code for + // FP_REG_KILL insertion. + ContainsFPCode = false; + + // Scan the PHI nodes that already are inserted into this basic block. If any + // of them is a PHI of a floating point value, we need to insert an + // FP_REG_KILL. + SSARegMap *RegMap = BB->getParent()->getSSARegMap(); + for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); + I != E; ++I) { + assert(I->getOpcode() == X86::PHI && + "Isn't just PHI nodes?"); + if (RegMap->getRegClass(I->getOperand(0).getReg()) == + X86::RFPRegisterClass) { + ContainsFPCode = true; + break; + } + } + + // Compute the RegPressureMap, which is an approximation for the number of + // registers required to compute each node. + ComputeRegPressure(DAG.getRoot()); + + // Codegen the basic block. + Select(DAG.getRoot()); + + // Finally, look at all of the successors of this block. If any contain a PHI + // node of FP type, we need to insert an FP_REG_KILL in this block. + for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), + E = BB->succ_end(); SI != E && !ContainsFPCode; ++SI) + for (MachineBasicBlock::iterator I = (*SI)->begin(), E = (*SI)->end(); + I != E && I->getOpcode() == X86::PHI; ++I) { + if (RegMap->getRegClass(I->getOperand(0).getReg()) == + X86::RFPRegisterClass) { + ContainsFPCode = true; + break; + } + } + + // Insert FP_REG_KILL instructions into basic blocks that need them. This + // only occurs due to the floating point stackifier not being aggressive + // enough to handle arbitrary global stackification. + // + // Currently we insert an FP_REG_KILL instruction into each block that uses or + // defines a floating point virtual register. + // + // When the global register allocators (like linear scan) finally update live + // variable analysis, we can keep floating point values in registers across + // basic blocks. This will be a huge win, but we are waiting on the global + // allocators before we can do this. + // + if (ContainsFPCode && BB->succ_size()) { + BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0); + ++NumFPKill; + } + + // Clear state used for selection. + ExprMap.clear(); + LoweredTokens.clear(); + RegPressureMap.clear(); +} + + // ComputeRegPressure - Compute the RegPressureMap, which is an approximation // for the number of registers required to compute each node. This is basically // computing a generalized form of the Sethi-Ullman number for each node. @@ -407,9 +453,13 @@ unsigned ISel::ComputeRegPressure(SDOperand O) { ++NumExtraMaxRegUsers; } } - - Result = MaxRegUse+NumExtraMaxRegUsers; + + if (O.getOpcode() != ISD::TokenFactor) + Result = MaxRegUse+NumExtraMaxRegUsers; + else + Result = std::max(MaxRegUse-1, 1); } + //std::cerr << " WEIGHT: " << Result << " "; N->dump(); std::cerr << "\n"; return Result; } @@ -436,7 +486,11 @@ bool ISel::SelectAddress(SDOperand N, X86AddressMode &AM) { AM.Disp += cast(N)->getValue(); return false; case ISD::SHL: - if (AM.IndexReg == 0 || AM.Scale == 1) + // We might have folded the load into this shift, so don't regen the value + // if so. + if (ExprMap.count(N)) break; + + if (AM.IndexReg == 0 && AM.Scale == 1) if (ConstantSDNode *CN = dyn_cast(N.Val->getOperand(1))) { unsigned Val = CN->getValue(); if (Val == 1 || Val == 2 || Val == 3) { @@ -446,13 +500,13 @@ bool ISel::SelectAddress(SDOperand N, X86AddressMode &AM) { // Okay, we know that we have a scale by now. However, if the scaled // value is an add of something and a constant, we can fold the // constant into the disp field here. - if (ShVal.Val->getOpcode() == ISD::ADD && + if (ShVal.Val->getOpcode() == ISD::ADD && !ExprMap.count(ShVal) && isa(ShVal.Val->getOperand(1))) { AM.IndexReg = SelectExpr(ShVal.Val->getOperand(0)); ConstantSDNode *AddVal = cast(ShVal.Val->getOperand(1)); AM.Disp += AddVal->getValue() << Val; - } else { + } else { AM.IndexReg = SelectExpr(ShVal); } return false; @@ -460,6 +514,10 @@ bool ISel::SelectAddress(SDOperand N, X86AddressMode &AM) { } break; case ISD::MUL: + // We might have folded the load into this mul, so don't regen the value if + // so. + if (ExprMap.count(N)) break; + // X*[3,5,9] -> X+X*[2,4,8] if (AM.IndexReg == 0 && AM.BaseType == X86AddressMode::RegBase && AM.Base.Reg == 0) @@ -473,7 +531,7 @@ bool ISel::SelectAddress(SDOperand N, X86AddressMode &AM) { // Okay, we know that we have a scale by now. However, if the scaled // value is an add of something and a constant, we can fold the // constant into the disp field here. - if (MulVal.Val->getOpcode() == ISD::ADD && + if (MulVal.Val->getOpcode() == ISD::ADD && !ExprMap.count(MulVal) && isa(MulVal.Val->getOperand(1))) { Reg = SelectExpr(MulVal.Val->getOperand(0)); ConstantSDNode *AddVal = @@ -489,11 +547,19 @@ bool ISel::SelectAddress(SDOperand N, X86AddressMode &AM) { break; case ISD::ADD: { + // We might have folded the load into this mul, so don't regen the value if + // so. + if (ExprMap.count(N)) break; + X86AddressMode Backup = AM; if (!SelectAddress(N.Val->getOperand(0), AM) && !SelectAddress(N.Val->getOperand(1), AM)) return false; AM = Backup; + if (!SelectAddress(N.Val->getOperand(1), AM) && + !SelectAddress(N.Val->getOperand(0), AM)) + return false; + AM = Backup; break; } } @@ -678,7 +744,7 @@ bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, case ISD::SETUGE: Opc = X86::JAE; break; } Select(Chain); - EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1)); + EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1), SetCC->hasOneUse()); BuildMI(BB, Opc, 1).addMBB(Dest); return false; } @@ -735,7 +801,7 @@ bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, } Select(Chain); - EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1)); + EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1), SetCC->hasOneUse()); BuildMI(BB, Opc, 1).addMBB(Dest); if (Opc2) BuildMI(BB, Opc2, 1).addMBB(Dest); @@ -847,16 +913,32 @@ void ISel::EmitSelectCC(SDOperand Cond, MVT::ValueType SVT, } } else { // FIXME: CMP R, 0 -> TEST R, R - EmitCMP(Cond.getOperand(0), Cond.getOperand(1)); + EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.Val->hasOneUse()); std::swap(RTrue, RFalse); } BuildMI(BB, Opc, 2, RDest).addReg(RTrue).addReg(RFalse); } -void ISel::EmitCMP(SDOperand LHS, SDOperand RHS) { +void ISel::EmitCMP(SDOperand LHS, SDOperand RHS, bool HasOneUse) { unsigned Opc; if (ConstantSDNode *CN = dyn_cast(RHS)) { Opc = 0; + if (HasOneUse && isFoldableLoad(LHS, RHS)) { + switch (RHS.getValueType()) { + default: break; + case MVT::i1: + case MVT::i8: Opc = X86::CMP8mi; break; + case MVT::i16: Opc = X86::CMP16mi; break; + case MVT::i32: Opc = X86::CMP32mi; break; + } + if (Opc) { + X86AddressMode AM; + EmitFoldedLoad(LHS, AM); + addFullAddress(BuildMI(BB, Opc, 5), AM).addImm(CN->getValue()); + return; + } + } + switch (RHS.getValueType()) { default: break; case MVT::i1: @@ -869,6 +951,32 @@ void ISel::EmitCMP(SDOperand LHS, SDOperand RHS) { BuildMI(BB, Opc, 2).addReg(Tmp1).addImm(CN->getValue()); return; } + } else if (ConstantFPSDNode *CN = dyn_cast(RHS)) { + if (CN->isExactlyValue(+0.0) || + CN->isExactlyValue(-0.0)) { + unsigned Reg = SelectExpr(LHS); + BuildMI(BB, X86::FTST, 1).addReg(Reg); + BuildMI(BB, X86::FNSTSW8r, 0); + BuildMI(BB, X86::SAHF, 1); + } + } + + Opc = 0; + if (HasOneUse && isFoldableLoad(LHS, RHS)) { + switch (RHS.getValueType()) { + default: break; + case MVT::i1: + case MVT::i8: Opc = X86::CMP8mr; break; + case MVT::i16: Opc = X86::CMP16mr; break; + case MVT::i32: Opc = X86::CMP32mr; break; + } + if (Opc) { + X86AddressMode AM; + EmitFoldedLoad(LHS, AM); + unsigned Reg = SelectExpr(RHS); + addFullAddress(BuildMI(BB, Opc, 5), AM).addReg(Reg); + return; + } } switch (LHS.getValueType()) { @@ -891,20 +999,58 @@ void ISel::EmitCMP(SDOperand LHS, SDOperand RHS) { BuildMI(BB, Opc, 2).addReg(Tmp1).addReg(Tmp2); } +/// NodeTransitivelyUsesValue - Return true if N or any of its uses uses Op. +/// The DAG cannot have cycles in it, by definition, so the visited set is not +/// needed to prevent infinite loops. The DAG CAN, however, have unbounded +/// reuse, so it prevents exponential cases. +/// +static bool NodeTransitivelyUsesValue(SDOperand N, SDOperand Op, + std::set &Visited) { + if (N == Op) return true; // Found it. + SDNode *Node = N.Val; + if (Node->getNumOperands() == 0) return false; // Leaf? + if (!Visited.insert(Node).second) return false; // Already visited? + + // Recurse for the first N-1 operands. + for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) + if (NodeTransitivelyUsesValue(Node->getOperand(i), Op, Visited)) + return true; + + // Tail recurse for the last operand. + return NodeTransitivelyUsesValue(Node->getOperand(0), Op, Visited); +} + /// isFoldableLoad - Return true if this is a load instruction that can safely /// be folded into an operation that uses it. -bool ISel::isFoldableLoad(SDOperand Op) { +bool ISel::isFoldableLoad(SDOperand Op, SDOperand OtherOp) { if (Op.getOpcode() != ISD::LOAD || // FIXME: currently can't fold constant pool indexes. isa(Op.getOperand(1))) return false; // If this load has already been emitted, we clearly can't fold it. - if (ExprMap.count(Op)) return false; - - return Op.Val->use_size() == 2; + assert(Op.ResNo == 0 && "Not a use of the value of the load?"); + if (ExprMap.count(Op.getValue(1))) return false; + assert(!ExprMap.count(Op.getValue(0)) && "Value in map but not token chain?"); + assert(!LoweredTokens.count(Op.getValue(1)) && + "Token lowered but value not in map?"); + + // If there is not just one use of its value, we cannot fold. + if (!Op.Val->hasNUsesOfValue(1, 0)) return false; + + // Finally, we cannot fold the load into the operation if this would induce a + // cycle into the resultant dag. To check for this, see if OtherOp (the other + // operand of the operation we are folding the load into) can possible use the + // chain node defined by the load. + if (OtherOp.Val && !Op.Val->hasNUsesOfValue(0, 1)) { // Has uses of chain? + std::set Visited; + if (NodeTransitivelyUsesValue(OtherOp, Op.getValue(1), Visited)) + return false; + } + return true; } + /// EmitFoldedLoad - Ensure that the arguments of the load are code generated, /// and compute the address being loaded into AM. void ISel::EmitFoldedLoad(SDOperand Op, X86AddressMode &AM) { @@ -919,8 +1065,11 @@ void ISel::EmitFoldedLoad(SDOperand Op, X86AddressMode &AM) { } // The chain for this load is now lowered. - LoweredTokens.insert(SDOperand(Op.Val, 1)); + assert(ExprMap.count(SDOperand(Op.Val, 1)) == 0 && + "Load emitted more than once?"); ExprMap[SDOperand(Op.Val, 1)] = 1; + if (!LoweredTokens.insert(Op.getValue(1)).second) + assert(0 && "Load emitted more than once!"); } unsigned ISel::SelectExpr(SDOperand N) { @@ -930,9 +1079,12 @@ unsigned ISel::SelectExpr(SDOperand N) { SDNode *Node = N.Val; SDOperand Op0, Op1; - if (Node->getOpcode() == ISD::CopyFromReg) + if (Node->getOpcode() == ISD::CopyFromReg) { + // FIXME: Handle copy from physregs! + // Just use the specified register as our input. - return dyn_cast(Node)->getReg(); + return dyn_cast(Node)->getReg(); + } unsigned &Reg = ExprMap[N]; if (Reg) return Reg; @@ -1011,18 +1163,31 @@ unsigned ISel::SelectExpr(SDOperand N) { case ISD::ZERO_EXTEND: { int DestIs16 = N.getValueType() == MVT::i16; int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16; - Tmp1 = SelectExpr(N.getOperand(0)); // FIXME: This hack is here for zero extension casts from bool to i8. This // would not be needed if bools were promoted by Legalize. if (N.getValueType() == MVT::i8) { + Tmp1 = SelectExpr(N.getOperand(0)); BuildMI(BB, X86::MOV8rr, 1, Result).addReg(Tmp1); return Result; } + if (isFoldableLoad(N.getOperand(0), SDOperand())) { + static const unsigned Opc[3] = { + X86::MOVZX32rm8, X86::MOVZX32rm16, X86::MOVZX16rm8 + }; + + X86AddressMode AM; + EmitFoldedLoad(N.getOperand(0), AM); + addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM); + + return Result; + } + static const unsigned Opc[3] = { X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOVZX16rr8 }; + Tmp1 = SelectExpr(N.getOperand(0)); BuildMI(BB, Opc[SrcIs16+DestIs16*2], 1, Result).addReg(Tmp1); return Result; } @@ -1034,6 +1199,17 @@ unsigned ISel::SelectExpr(SDOperand N) { assert(N.getOperand(0).getValueType() != MVT::i1 && "Sign extend from bool not implemented!"); + if (isFoldableLoad(N.getOperand(0), SDOperand())) { + static const unsigned Opc[3] = { + X86::MOVSX32rm8, X86::MOVSX32rm16, X86::MOVSX16rm8 + }; + + X86AddressMode AM; + EmitFoldedLoad(N.getOperand(0), AM); + addFullAddress(BuildMI(BB, Opc[SrcIs16+DestIs16*2], 4, Result), AM); + return Result; + } + static const unsigned Opc[3] = { X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOVSX16rr8 }; @@ -1042,6 +1218,20 @@ unsigned ISel::SelectExpr(SDOperand N) { return Result; } case ISD::TRUNCATE: + // Fold TRUNCATE (LOAD P) into a smaller load from P. + if (isFoldableLoad(N.getOperand(0), SDOperand())) { + switch (N.getValueType()) { + default: assert(0 && "Unknown truncate!"); + case MVT::i1: + case MVT::i8: Opc = X86::MOV8rm; break; + case MVT::i16: Opc = X86::MOV16rm; break; + } + X86AddressMode AM; + EmitFoldedLoad(N.getOperand(0), AM); + addFullAddress(BuildMI(BB, Opc, 4, Result), AM); + return Result; + } + // Handle cast of LARGER int to SMALLER int using a move to EAX followed by // a move out of AX or AL. switch (N.getOperand(0).getValueType()) { @@ -1130,6 +1320,7 @@ unsigned ISel::SelectExpr(SDOperand N) { switch (SrcTy) { case MVT::i64: + assert(0 && "Cast ulong to FP not implemented yet!"); // FIXME: this won't work for cast [u]long to FP addFrameReference(BuildMI(BB, X86::MOV32mr, 5), FrameIdx).addReg(Tmp1); @@ -1150,7 +1341,7 @@ unsigned ISel::SelectExpr(SDOperand N) { default: break; // No promotion required. } - if (Node->getOpcode() == ISD::UINT_TO_FP && SrcTy == MVT::i32) { + if (Node->getOpcode() == ISD::UINT_TO_FP && Result != RealDestReg) { // If this is a cast from uint -> double, we need to be careful when if // the "sign" bit is set. If so, we don't want to make a negative number, // we want to make a positive number. Emit code to add an offset if the @@ -1265,6 +1456,7 @@ unsigned ISel::SelectExpr(SDOperand N) { assert(0 && "Unknown integer type!"); case MVT::i64: // FIXME: this isn't gunna work. + assert(0 && "Cast FP to long not implemented yet!"); addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result), FrameIdx); addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Result+1), FrameIdx, 4); case MVT::i32: @@ -1286,10 +1478,13 @@ unsigned ISel::SelectExpr(SDOperand N) { Op0 = N.getOperand(0); Op1 = N.getOperand(1); - if (isFoldableLoad(Op0)) + if (isFoldableLoad(Op0, Op1)) { std::swap(Op0, Op1); + goto FoldAdd; + } - if (isFoldableLoad(Op1)) { + if (isFoldableLoad(Op1, Op0)) { + FoldAdd: switch (N.getValueType()) { default: assert(0 && "Cannot add this type!"); case MVT::i1: @@ -1300,13 +1495,8 @@ unsigned ISel::SelectExpr(SDOperand N) { case MVT::f64: Opc = X86::FADD64m; break; } X86AddressMode AM; - if (getRegPressure(Op0) > getRegPressure(Op1)) { - Tmp1 = SelectExpr(Op0); - EmitFoldedLoad(Op1, AM); - } else { - EmitFoldedLoad(Op1, AM); - Tmp1 = SelectExpr(Op0); - } + EmitFoldedLoad(Op1, AM); + Tmp1 = SelectExpr(Op0); addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM); return Result; } @@ -1387,7 +1577,7 @@ unsigned ISel::SelectExpr(SDOperand N) { case ISD::MUL: case ISD::AND: case ISD::OR: - case ISD::XOR: + case ISD::XOR: { static const unsigned SUBTab[] = { X86::SUB8ri, X86::SUB16ri, X86::SUB32ri, 0, 0, X86::SUB8rm, X86::SUB16rm, X86::SUB32rm, X86::FSUB32m, X86::FSUB64m, @@ -1434,16 +1624,37 @@ unsigned ISel::SelectExpr(SDOperand N) { if (ConstantSDNode *CN = dyn_cast(Op1)) { if (CN->isAllOnesValue() && Node->getOpcode() == ISD::XOR) { + Opc = 0; switch (N.getValueType()) { default: assert(0 && "Cannot add this type!"); - case MVT::i1: + case MVT::i1: break; // Not supported, don't invert upper bits! case MVT::i8: Opc = X86::NOT8r; break; case MVT::i16: Opc = X86::NOT16r; break; case MVT::i32: Opc = X86::NOT32r; break; } - Tmp1 = SelectExpr(Op0); - BuildMI(BB, Opc, 1, Result).addReg(Tmp1); - return Result; + if (Opc) { + Tmp1 = SelectExpr(Op0); + BuildMI(BB, Opc, 1, Result).addReg(Tmp1); + return Result; + } + } + + // Fold common multiplies into LEA instructions. + if (Node->getOpcode() == ISD::MUL && N.getValueType() == MVT::i32) { + switch ((int)CN->getValue()) { + default: break; + case 3: + case 5: + case 9: + X86AddressMode AM; + // Remove N from exprmap so SelectAddress doesn't get confused. + ExprMap.erase(N); + SelectAddress(N, AM); + // Restore it to the map. + ExprMap[N] = Result; + addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), AM); + return Result; + } } switch (N.getValueType()) { @@ -1468,9 +1679,10 @@ unsigned ISel::SelectExpr(SDOperand N) { } } - if (isFoldableLoad(Op0)) + if (isFoldableLoad(Op0, Op1)) if (Node->getOpcode() != ISD::SUB) { std::swap(Op0, Op1); + goto FoldOps; } else { // Emit 'reverse' subract, with a memory operand. switch (N.getValueType()) { @@ -1480,19 +1692,15 @@ unsigned ISel::SelectExpr(SDOperand N) { } if (Opc) { X86AddressMode AM; - if (getRegPressure(Op0) > getRegPressure(Op1)) { - EmitFoldedLoad(Op0, AM); - Tmp1 = SelectExpr(Op1); - } else { - Tmp1 = SelectExpr(Op1); - EmitFoldedLoad(Op0, AM); - } + EmitFoldedLoad(Op0, AM); + Tmp1 = SelectExpr(Op1); addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM); return Result; } } - if (isFoldableLoad(Op1)) { + if (isFoldableLoad(Op1, Op0)) { + FoldOps: switch (N.getValueType()) { default: assert(0 && "Cannot operate on this type!"); case MVT::i1: @@ -1512,13 +1720,8 @@ unsigned ISel::SelectExpr(SDOperand N) { } X86AddressMode AM; - if (getRegPressure(Op0) > getRegPressure(Op1)) { - Tmp1 = SelectExpr(Op0); - EmitFoldedLoad(Op1, AM); - } else { - EmitFoldedLoad(Op1, AM); - Tmp1 = SelectExpr(Op0); - } + EmitFoldedLoad(Op1, AM); + Tmp1 = SelectExpr(Op0); if (Opc) { addFullAddress(BuildMI(BB, Opc, 5, Result).addReg(Tmp1), AM); } else { @@ -1568,44 +1771,25 @@ unsigned ISel::SelectExpr(SDOperand N) { BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL); } return Result; - + } case ISD::SELECT: - if (N.getValueType() != MVT::i1 && N.getValueType() != MVT::i8) { - if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) { - Tmp2 = SelectExpr(N.getOperand(1)); - Tmp3 = SelectExpr(N.getOperand(2)); - } else { - Tmp3 = SelectExpr(N.getOperand(2)); - Tmp2 = SelectExpr(N.getOperand(1)); - } - EmitSelectCC(N.getOperand(0), N.getValueType(), Tmp2, Tmp3, Result); - return Result; + if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) { + Tmp2 = SelectExpr(N.getOperand(1)); + Tmp3 = SelectExpr(N.getOperand(2)); } else { - // FIXME: This should not be implemented here, it should be in the generic - // code! - if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) { - Tmp2 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16, - N.getOperand(1))); - Tmp3 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16, - N.getOperand(2))); - } else { - Tmp3 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16, - N.getOperand(2))); - Tmp2 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16, - N.getOperand(1))); - } - unsigned TmpReg = MakeReg(MVT::i16); - EmitSelectCC(N.getOperand(0), MVT::i16, Tmp2, Tmp3, TmpReg); - // FIXME: need subregs to do better than this! - BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(TmpReg); - BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL); - return Result; + Tmp3 = SelectExpr(N.getOperand(2)); + Tmp2 = SelectExpr(N.getOperand(1)); } + EmitSelectCC(N.getOperand(0), N.getValueType(), Tmp2, Tmp3, Result); + return Result; case ISD::SDIV: case ISD::UDIV: case ISD::SREM: case ISD::UREM: { + assert((N.getOpcode() != ISD::SREM || MVT::isInteger(N.getValueType())) && + "We don't support this operator!"); + if (N.getOpcode() == ISD::SDIV) if (ConstantSDNode *CN = dyn_cast(N.getOperand(1))) { // FIXME: These special cases should be handled by the lowering impl! @@ -1686,7 +1870,7 @@ unsigned ISel::SelectExpr(SDOperand N) { break; case MVT::i32: DivOpcode = isSigned ? X86::IDIV32r : X86::DIV32r; - LoReg =X86::EAX; + LoReg = X86::EAX; HiReg = X86::EDX; MovOpcode = X86::MOV32rr; ClrOpcode = X86::MOV32ri; @@ -1695,10 +1879,7 @@ unsigned ISel::SelectExpr(SDOperand N) { case MVT::i64: assert(0 && "FIXME: implement i64 DIV/REM libcalls!"); case MVT::f32: case MVT::f64: - if (N.getOpcode() == ISD::SDIV) - BuildMI(BB, X86::FpDIV, 2, Result).addReg(Tmp1).addReg(Tmp2); - else - assert(0 && "FIXME: Emit frem libcall to fmod!"); + BuildMI(BB, X86::FpDIV, 2, Result).addReg(Tmp1).addReg(Tmp2); return Result; } @@ -1825,11 +2006,11 @@ unsigned ISel::SelectExpr(SDOperand N) { return Result; case ISD::SETCC: - EmitCMP(N.getOperand(0), N.getOperand(1)); + EmitCMP(N.getOperand(0), N.getOperand(1), Node->hasOneUse()); EmitSetCC(BB, Result, cast(N)->getCondition(), MVT::isFloatingPoint(N.getOperand(1).getValueType())); return Result; - case ISD::LOAD: { + case ISD::LOAD: // Make sure we generate both values. if (Result != 1) ExprMap[N.getValue(1)] = 1; // Generate the token @@ -1851,11 +2032,122 @@ unsigned ISel::SelectExpr(SDOperand N) { addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CP->getIndex()); } else { X86AddressMode AM; - EmitFoldedLoad(N, AM); + + SDOperand Chain = N.getOperand(0); + SDOperand Address = N.getOperand(1); + if (getRegPressure(Chain) > getRegPressure(Address)) { + Select(Chain); + SelectAddress(Address, AM); + } else { + SelectAddress(Address, AM); + Select(Chain); + } + addFullAddress(BuildMI(BB, Opc, 4, Result), AM); } return Result; + + case ISD::EXTLOAD: // Arbitrarily codegen extloads as MOVZX* + case ISD::ZEXTLOAD: { + // Make sure we generate both values. + if (Result != 1) + ExprMap[N.getValue(1)] = 1; // Generate the token + else + Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType()); + + if (ConstantPoolSDNode *CP = dyn_cast(N.getOperand(1))) + if (Node->getValueType(0) == MVT::f64) { + assert(cast(Node)->getExtraValueType() == MVT::f32 && + "Bad EXTLOAD!"); + addConstantPoolReference(BuildMI(BB, X86::FLD32m, 4, Result), + CP->getIndex()); + return Result; + } + + X86AddressMode AM; + if (getRegPressure(Node->getOperand(0)) > + getRegPressure(Node->getOperand(1))) { + Select(Node->getOperand(0)); // chain + SelectAddress(Node->getOperand(1), AM); + } else { + SelectAddress(Node->getOperand(1), AM); + Select(Node->getOperand(0)); // chain + } + + switch (Node->getValueType(0)) { + default: assert(0 && "Unknown type to sign extend to."); + case MVT::f64: + assert(cast(Node)->getExtraValueType() == MVT::f32 && + "Bad EXTLOAD!"); + addFullAddress(BuildMI(BB, X86::FLD32m, 5, Result), AM); + break; + case MVT::i32: + switch (cast(Node)->getExtraValueType()) { + default: + assert(0 && "Bad zero extend!"); + case MVT::i1: + case MVT::i8: + addFullAddress(BuildMI(BB, X86::MOVZX32rm8, 5, Result), AM); + break; + case MVT::i16: + addFullAddress(BuildMI(BB, X86::MOVZX32rm16, 5, Result), AM); + break; + } + break; + case MVT::i16: + assert(cast(Node)->getExtraValueType() <= MVT::i8 && + "Bad zero extend!"); + addFullAddress(BuildMI(BB, X86::MOVSX16rm8, 5, Result), AM); + break; + case MVT::i8: + assert(cast(Node)->getExtraValueType() == MVT::i1 && + "Bad zero extend!"); + addFullAddress(BuildMI(BB, X86::MOV8rm, 5, Result), AM); + break; + } + return Result; } + case ISD::SEXTLOAD: { + // Make sure we generate both values. + if (Result != 1) + ExprMap[N.getValue(1)] = 1; // Generate the token + else + Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType()); + + X86AddressMode AM; + if (getRegPressure(Node->getOperand(0)) > + getRegPressure(Node->getOperand(1))) { + Select(Node->getOperand(0)); // chain + SelectAddress(Node->getOperand(1), AM); + } else { + SelectAddress(Node->getOperand(1), AM); + Select(Node->getOperand(0)); // chain + } + + switch (Node->getValueType(0)) { + case MVT::i8: assert(0 && "Cannot sign extend from bool!"); + default: assert(0 && "Unknown type to sign extend to."); + case MVT::i32: + switch (cast(Node)->getExtraValueType()) { + default: + case MVT::i1: assert(0 && "Cannot sign extend from bool!"); + case MVT::i8: + addFullAddress(BuildMI(BB, X86::MOVSX32rm8, 5, Result), AM); + break; + case MVT::i16: + addFullAddress(BuildMI(BB, X86::MOVSX32rm16, 5, Result), AM); + break; + } + break; + case MVT::i16: + assert(cast(Node)->getExtraValueType() == MVT::i8 && + "Cannot sign extend from bool!"); + addFullAddress(BuildMI(BB, X86::MOVSX16rm8, 5, Result), AM); + break; + } + return Result; + } + case ISD::DYNAMIC_STACKALLOC: // Generate both result values. if (Result != 1) @@ -1947,6 +2239,213 @@ unsigned ISel::SelectExpr(SDOperand N) { return 0; } +/// TryToFoldLoadOpStore - Given a store node, try to fold together a +/// load/op/store instruction. If successful return true. +bool ISel::TryToFoldLoadOpStore(SDNode *Node) { + assert(Node->getOpcode() == ISD::STORE && "Can only do this for stores!"); + SDOperand Chain = Node->getOperand(0); + SDOperand StVal = Node->getOperand(1); + SDOperand StPtr = Node->getOperand(2); + + // The chain has to be a load, the stored value must be an integer binary + // operation with one use. + if (!StVal.Val->hasOneUse() || StVal.Val->getNumOperands() != 2 || + MVT::isFloatingPoint(StVal.getValueType())) + return false; + + // Token chain must either be a factor node or the load to fold. + if (Chain.getOpcode() != ISD::LOAD && Chain.getOpcode() != ISD::TokenFactor) + return false; + + SDOperand TheLoad; + + // Check to see if there is a load from the same pointer that we're storing + // to in either operand of the binop. + if (StVal.getOperand(0).getOpcode() == ISD::LOAD && + StVal.getOperand(0).getOperand(1) == StPtr) + TheLoad = StVal.getOperand(0); + else if (StVal.getOperand(1).getOpcode() == ISD::LOAD && + StVal.getOperand(1).getOperand(1) == StPtr) + TheLoad = StVal.getOperand(1); + else + return false; // No matching load operand. + + // We can only fold the load if there are no intervening side-effecting + // operations. This means that the store uses the load as its token chain, or + // there are only token factor nodes in between the store and load. + if (Chain != TheLoad.getValue(1)) { + // Okay, the other option is that we have a store referring to (possibly + // nested) token factor nodes. For now, just try peeking through one level + // of token factors to see if this is the case. + bool ChainOk = false; + if (Chain.getOpcode() == ISD::TokenFactor) { + for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) + if (Chain.getOperand(i) == TheLoad.getValue(1)) { + ChainOk = true; + break; + } + } + + if (!ChainOk) return false; + } + + if (TheLoad.getOperand(1) != StPtr) + return false; + + // Make sure that one of the operands of the binop is the load, and that the + // load folds into the binop. + if (((StVal.getOperand(0) != TheLoad || + !isFoldableLoad(TheLoad, StVal.getOperand(1))) && + (StVal.getOperand(1) != TheLoad || + !isFoldableLoad(TheLoad, StVal.getOperand(0))))) + return false; + + // Finally, check to see if this is one of the ops we can handle! + static const unsigned ADDTAB[] = { + X86::ADD8mi, X86::ADD16mi, X86::ADD32mi, + X86::ADD8mr, X86::ADD16mr, X86::ADD32mr, + }; + static const unsigned SUBTAB[] = { + X86::SUB8mi, X86::SUB16mi, X86::SUB32mi, + X86::SUB8mr, X86::SUB16mr, X86::SUB32mr, + }; + static const unsigned ANDTAB[] = { + X86::AND8mi, X86::AND16mi, X86::AND32mi, + X86::AND8mr, X86::AND16mr, X86::AND32mr, + }; + static const unsigned ORTAB[] = { + X86::OR8mi, X86::OR16mi, X86::OR32mi, + X86::OR8mr, X86::OR16mr, X86::OR32mr, + }; + static const unsigned XORTAB[] = { + X86::XOR8mi, X86::XOR16mi, X86::XOR32mi, + X86::XOR8mr, X86::XOR16mr, X86::XOR32mr, + }; + static const unsigned SHLTAB[] = { + X86::SHL8mi, X86::SHL16mi, X86::SHL32mi, + /*Have to put the reg in CL*/0, 0, 0, + }; + static const unsigned SARTAB[] = { + X86::SAR8mi, X86::SAR16mi, X86::SAR32mi, + /*Have to put the reg in CL*/0, 0, 0, + }; + static const unsigned SHRTAB[] = { + X86::SHR8mi, X86::SHR16mi, X86::SHR32mi, + /*Have to put the reg in CL*/0, 0, 0, + }; + + const unsigned *TabPtr = 0; + switch (StVal.getOpcode()) { + default: + std::cerr << "CANNOT [mem] op= val: "; + StVal.Val->dump(); std::cerr << "\n"; + case ISD::MUL: + case ISD::SDIV: + case ISD::UDIV: + case ISD::SREM: + case ISD::UREM: return false; + + case ISD::ADD: TabPtr = ADDTAB; break; + case ISD::SUB: TabPtr = SUBTAB; break; + case ISD::AND: TabPtr = ANDTAB; break; + case ISD:: OR: TabPtr = ORTAB; break; + case ISD::XOR: TabPtr = XORTAB; break; + case ISD::SHL: TabPtr = SHLTAB; break; + case ISD::SRA: TabPtr = SARTAB; break; + case ISD::SRL: TabPtr = SHRTAB; break; + } + + // Handle: [mem] op= CST + SDOperand Op0 = StVal.getOperand(0); + SDOperand Op1 = StVal.getOperand(1); + unsigned Opc; + if (ConstantSDNode *CN = dyn_cast(Op1)) { + switch (Op0.getValueType()) { // Use Op0's type because of shifts. + default: break; + case MVT::i1: + case MVT::i8: Opc = TabPtr[0]; break; + case MVT::i16: Opc = TabPtr[1]; break; + case MVT::i32: Opc = TabPtr[2]; break; + } + + if (Opc) { + LoweredTokens.insert(TheLoad.getValue(1)); + Select(Chain); + + X86AddressMode AM; + if (getRegPressure(TheLoad.getOperand(0)) > + getRegPressure(TheLoad.getOperand(1))) { + Select(TheLoad.getOperand(0)); + SelectAddress(TheLoad.getOperand(1), AM); + } else { + SelectAddress(TheLoad.getOperand(1), AM); + Select(TheLoad.getOperand(0)); + } + + if (StVal.getOpcode() == ISD::ADD) { + if (CN->getValue() == 1) { + switch (Op0.getValueType()) { + default: break; + case MVT::i8: + addFullAddress(BuildMI(BB, X86::INC8m, 4), AM); + return true; + case MVT::i16: Opc = TabPtr[1]; + addFullAddress(BuildMI(BB, X86::INC16m, 4), AM); + return true; + case MVT::i32: Opc = TabPtr[2]; + addFullAddress(BuildMI(BB, X86::INC32m, 4), AM); + return true; + } + } else if (CN->getValue()+1 == 0) { // [X] += -1 -> DEC [X] + switch (Op0.getValueType()) { + default: break; + case MVT::i8: + addFullAddress(BuildMI(BB, X86::DEC8m, 4), AM); + return true; + case MVT::i16: Opc = TabPtr[1]; + addFullAddress(BuildMI(BB, X86::DEC16m, 4), AM); + return true; + case MVT::i32: Opc = TabPtr[2]; + addFullAddress(BuildMI(BB, X86::DEC32m, 4), AM); + return true; + } + } + } + + addFullAddress(BuildMI(BB, Opc, 4+1),AM).addImm(CN->getValue()); + return true; + } + } + + // If we have [mem] = V op [mem], try to turn it into: + // [mem] = [mem] op V. + if (Op1 == TheLoad && StVal.getOpcode() != ISD::SUB && + StVal.getOpcode() != ISD::SHL && StVal.getOpcode() != ISD::SRA && + StVal.getOpcode() != ISD::SRL) + std::swap(Op0, Op1); + + if (Op0 != TheLoad) return false; + + switch (Op0.getValueType()) { + default: return false; + case MVT::i1: + case MVT::i8: Opc = TabPtr[3]; break; + case MVT::i16: Opc = TabPtr[4]; break; + case MVT::i32: Opc = TabPtr[5]; break; + } + + LoweredTokens.insert(TheLoad.getValue(1)); + Select(Chain); + + Select(TheLoad.getOperand(0)); + X86AddressMode AM; + SelectAddress(TheLoad.getOperand(1), AM); + unsigned Reg = SelectExpr(Op1); + addFullAddress(BuildMI(BB, Opc, 4+1),AM).addReg(Reg); + return true; +} + + void ISel::Select(SDOperand N) { unsigned Tmp1, Tmp2, Opc; @@ -1961,10 +2460,31 @@ void ISel::Select(SDOperand N) { Node->dump(); std::cerr << "\n"; assert(0 && "Node not handled yet!"); case ISD::EntryToken: return; // Noop + case ISD::TokenFactor: + if (Node->getNumOperands() == 2) { + bool OneFirst = + getRegPressure(Node->getOperand(1))>getRegPressure(Node->getOperand(0)); + Select(Node->getOperand(OneFirst)); + Select(Node->getOperand(!OneFirst)); + } else { + std::vector > OpsP; + for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) + OpsP.push_back(std::make_pair(getRegPressure(Node->getOperand(i)), i)); + std::sort(OpsP.begin(), OpsP.end()); + std::reverse(OpsP.begin(), OpsP.end()); + for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) + Select(Node->getOperand(OpsP[i].second)); + } + return; case ISD::CopyToReg: - Select(N.getOperand(0)); - Tmp1 = SelectExpr(N.getOperand(1)); - Tmp2 = cast(N)->getReg(); + if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) { + Select(N.getOperand(0)); + Tmp1 = SelectExpr(N.getOperand(1)); + } else { + Tmp1 = SelectExpr(N.getOperand(1)); + Select(N.getOperand(0)); + } + Tmp2 = cast(N)->getReg(); if (Tmp1 != Tmp2) { switch (N.getOperand(1).getValueType()) { @@ -2057,11 +2577,99 @@ void ISel::Select(SDOperand N) { return; } + case ISD::LOAD: + // If this load could be folded into the only using instruction, and if it + // is safe to emit the instruction here, try to do so now. + if (Node->hasNUsesOfValue(1, 0)) { + SDOperand TheVal = N.getValue(0); + SDNode *User = 0; + for (SDNode::use_iterator UI = Node->use_begin(); ; ++UI) { + assert(UI != Node->use_end() && "Didn't find use!"); + SDNode *UN = *UI; + for (unsigned i = 0, e = UN->getNumOperands(); i != e; ++i) + if (UN->getOperand(i) == TheVal) { + User = UN; + goto FoundIt; + } + } + FoundIt: + // Only handle unary operators right now. + if (User->getNumOperands() == 1) { + LoweredTokens.erase(N); + SelectExpr(SDOperand(User, 0)); + return; + } + } + SelectExpr(N); + return; + + case ISD::EXTLOAD: + case ISD::SEXTLOAD: + case ISD::ZEXTLOAD: case ISD::CALL: case ISD::DYNAMIC_STACKALLOC: SelectExpr(N); return; + + case ISD::TRUNCSTORE: { // truncstore chain, val, ptr :storety + // On X86, we can represent all types except for Bool and Float natively. + X86AddressMode AM; + MVT::ValueType StoredTy = cast(Node)->getExtraValueType(); + assert((StoredTy == MVT::i1 || StoredTy == MVT::f32 || + StoredTy == MVT::i16 /*FIXME: THIS IS JUST FOR TESTING!*/) + && "Unsupported TRUNCSTORE for this target!"); + + if (StoredTy == MVT::i16) { + // FIXME: This is here just to allow testing. X86 doesn't really have a + // TRUNCSTORE i16 operation, but this is required for targets that do not + // have 16-bit integer registers. We occasionally disable 16-bit integer + // registers to test the promotion code. + Select(N.getOperand(0)); + Tmp1 = SelectExpr(N.getOperand(1)); + SelectAddress(N.getOperand(2), AM); + + BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1); + addFullAddress(BuildMI(BB, X86::MOV16mr, 5), AM).addReg(X86::AX); + return; + } + + // Store of constant bool? + if (ConstantSDNode *CN = dyn_cast(N.getOperand(1))) { + if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) { + Select(N.getOperand(0)); + SelectAddress(N.getOperand(2), AM); + } else { + SelectAddress(N.getOperand(2), AM); + Select(N.getOperand(0)); + } + addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CN->getValue()); + return; + } + + switch (StoredTy) { + default: assert(0 && "Cannot truncstore this type!"); + case MVT::i1: Opc = X86::MOV8mr; break; + case MVT::f32: Opc = X86::FST32m; break; + } + + std::vector > RP; + RP.push_back(std::make_pair(getRegPressure(N.getOperand(0)), 0)); + RP.push_back(std::make_pair(getRegPressure(N.getOperand(1)), 1)); + RP.push_back(std::make_pair(getRegPressure(N.getOperand(2)), 2)); + std::sort(RP.begin(), RP.end()); + + for (unsigned i = 0; i != 3; ++i) + switch (RP[2-i].second) { + default: assert(0 && "Unknown operand number!"); + case 0: Select(N.getOperand(0)); break; + case 1: Tmp1 = SelectExpr(N.getOperand(1)); break; + case 2: SelectAddress(N.getOperand(2), AM); break; + } + + addFullAddress(BuildMI(BB, Opc, 4+1), AM).addReg(Tmp1); + return; + } case ISD::STORE: { X86AddressMode AM; @@ -2090,86 +2698,8 @@ void ISel::Select(SDOperand N) { } // Check to see if this is a load/op/store combination. - if (N.getOperand(1).Val->hasOneUse() && - isFoldableLoad(N.getOperand(0).getValue(0))) { - SDOperand TheLoad = N.getOperand(0).getValue(0); - // Check to see if we are loading the same pointer that we're storing to. - if (TheLoad.getOperand(1) == N.getOperand(2)) { - // See if the stored value is a simple binary operator that uses the - // load as one of its operands. - SDOperand Op = N.getOperand(1); - if (Op.Val->getNumOperands() == 2 && - (Op.getOperand(0) == TheLoad || Op.getOperand(1) == TheLoad)) { - // Finally, check to see if this is one of the ops we can handle! - static const unsigned ADDTAB[] = { - X86::ADD8mi, X86::ADD16mi, X86::ADD32mi, - X86::ADD8mr, X86::ADD16mr, X86::ADD32mr, 0, 0, - }; - - const unsigned *TabPtr = 0; - switch (Op.getOpcode()) { - default: break; - case ISD::ADD: TabPtr = ADDTAB; break; - } - - if (TabPtr) { - // Handle: [mem] op= CST - SDOperand Op0 = Op.getOperand(0); - SDOperand Op1 = Op.getOperand(1); - if (ConstantSDNode *CN = dyn_cast(Op1)) { - switch (CN->getValueType(0)) { - default: break; - case MVT::i1: - case MVT::i8: Opc = TabPtr[0]; break; - case MVT::i16: Opc = TabPtr[1]; break; - case MVT::i32: Opc = TabPtr[2]; break; - } - - if (Opc) { - if (getRegPressure(TheLoad.getOperand(0)) > - getRegPressure(TheLoad.getOperand(1))) { - Select(TheLoad.getOperand(0)); - SelectAddress(TheLoad.getOperand(1), AM); - } else { - SelectAddress(TheLoad.getOperand(1), AM); - Select(TheLoad.getOperand(0)); - } - - addFullAddress(BuildMI(BB, Opc, 4+1),AM).addImm(CN->getValue()); - return; - } - } - - // If we have [mem] = V op [mem], try to turn it into: - // [mem] = [mem] op V. - if (Op1 == TheLoad && 1 /*iscommutative*/) - std::swap(Op0, Op1); - - if (Op0 == TheLoad) { - switch (Op0.getValueType()) { - default: break; - case MVT::i1: - case MVT::i8: Opc = TabPtr[3]; break; - case MVT::i16: Opc = TabPtr[4]; break; - case MVT::i32: Opc = TabPtr[5]; break; - case MVT::f32: Opc = TabPtr[6]; break; - case MVT::f64: Opc = TabPtr[7]; break; - } - - if (Opc) { - Select(TheLoad.getOperand(0)); - SelectAddress(TheLoad.getOperand(1), AM); - unsigned Reg = SelectExpr(Op1); - addFullAddress(BuildMI(BB, Opc, 4+1),AM).addReg(Reg); - return; - } - } - //Opc = TabPtr[Opc]; - } - } - } - } - + if (TryToFoldLoadOpStore(Node)) + return; switch (N.getOperand(1).getValueType()) { default: assert(0 && "Cannot store this type!");