X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86ISelPattern.cpp;h=a75854e5592787c1953ec46fcf45f3c1523393c1;hb=90d1be7eefdc98d12ea5ddd13715e2bfc8679351;hp=a6932faea0ddcbfd2bff31c73de21b1590d49376;hpb=e9ef81dd2f9f51ac80bc000cca67051020a1e227;p=oota-llvm.git diff --git a/lib/Target/X86/X86ISelPattern.cpp b/lib/Target/X86/X86ISelPattern.cpp index a6932faea0d..a75854e5592 100644 --- a/lib/Target/X86/X86ISelPattern.cpp +++ b/lib/Target/X86/X86ISelPattern.cpp @@ -39,6 +39,12 @@ namespace { public: X86TargetLowering(TargetMachine &TM) : TargetLowering(TM) { // Set up the TargetLowering object. + + // X86 is wierd, it always uses i8 for shift amounts and setcc results. + setShiftAmountType(MVT::i8); + setSetCCResultType(MVT::i8); + + // Set up the register classes. addRegisterClass(MVT::i8, X86::R8RegisterClass); addRegisterClass(MVT::i16, X86::R16RegisterClass); addRegisterClass(MVT::i32, X86::R32RegisterClass); @@ -46,16 +52,23 @@ namespace { // FIXME: Eliminate these two classes when legalize can handle promotions // well. - addRegisterClass(MVT::i1, X86::R8RegisterClass); - addRegisterClass(MVT::f32, X86::RFPRegisterClass); +/**/ addRegisterClass(MVT::i1, X86::R8RegisterClass); +/**/ //addRegisterClass(MVT::f32, X86::RFPRegisterClass); + + setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand); + setOperationAction(ISD::ZERO_EXTEND_INREG, MVT::i16 , Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); + setOperationAction(ISD::ZERO_EXTEND_INREG, MVT::i1 , Expand); + setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); + setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand); + setOperationAction(ISD::SREM , MVT::f64 , Expand); + + // These should be promoted to a larger select which is supported. +/**/ setOperationAction(ISD::SELECT , MVT::i1 , Promote); + setOperationAction(ISD::SELECT , MVT::i8 , Promote); computeRegisterProperties(); - - setOperationUnsupported(ISD::MEMMOVE, MVT::Other); - - //setOperationUnsupported(ISD::SEXTLOAD, MVT::i1); - setOperationUnsupported(ISD::SELECT, MVT::i1); - setOperationUnsupported(ISD::SELECT, MVT::i8); addLegalFPImmediate(+0.0); // FLD0 addLegalFPImmediate(+1.0); // FLD1 @@ -330,11 +343,11 @@ namespace { /// SelectionDAGISel when it has created a SelectionDAG for us to codegen. virtual void InstructionSelectBasicBlock(SelectionDAG &DAG); - bool isFoldableLoad(SDOperand Op); + bool isFoldableLoad(SDOperand Op, SDOperand OtherOp); void EmitFoldedLoad(SDOperand Op, X86AddressMode &AM); + bool TryToFoldLoadOpStore(SDNode *Node); - - void EmitCMP(SDOperand LHS, SDOperand RHS); + void EmitCMP(SDOperand LHS, SDOperand RHS, bool isOnlyUse); bool EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, SDOperand Cond); void EmitSelectCC(SDOperand Cond, MVT::ValueType SVT, unsigned RTrue, unsigned RFalse, unsigned RDest); @@ -440,8 +453,11 @@ unsigned ISel::ComputeRegPressure(SDOperand O) { ++NumExtraMaxRegUsers; } } - - Result = MaxRegUse+NumExtraMaxRegUsers; + + if (O.getOpcode() != ISD::TokenFactor) + Result = MaxRegUse+NumExtraMaxRegUsers; + else + Result = std::max(MaxRegUse-1, 1); } //std::cerr << " WEIGHT: " << Result << " "; N->dump(); std::cerr << "\n"; @@ -728,7 +744,7 @@ bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, case ISD::SETUGE: Opc = X86::JAE; break; } Select(Chain); - EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1)); + EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1), SetCC->hasOneUse()); BuildMI(BB, Opc, 1).addMBB(Dest); return false; } @@ -785,7 +801,7 @@ bool ISel::EmitBranchCC(MachineBasicBlock *Dest, SDOperand Chain, } Select(Chain); - EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1)); + EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1), SetCC->hasOneUse()); BuildMI(BB, Opc, 1).addMBB(Dest); if (Opc2) BuildMI(BB, Opc2, 1).addMBB(Dest); @@ -897,17 +913,17 @@ void ISel::EmitSelectCC(SDOperand Cond, MVT::ValueType SVT, } } else { // FIXME: CMP R, 0 -> TEST R, R - EmitCMP(Cond.getOperand(0), Cond.getOperand(1)); + EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.Val->hasOneUse()); std::swap(RTrue, RFalse); } BuildMI(BB, Opc, 2, RDest).addReg(RTrue).addReg(RFalse); } -void ISel::EmitCMP(SDOperand LHS, SDOperand RHS) { +void ISel::EmitCMP(SDOperand LHS, SDOperand RHS, bool HasOneUse) { unsigned Opc; if (ConstantSDNode *CN = dyn_cast(RHS)) { Opc = 0; - if (isFoldableLoad(LHS)) { + if (HasOneUse && isFoldableLoad(LHS, RHS)) { switch (RHS.getValueType()) { default: break; case MVT::i1: @@ -946,7 +962,7 @@ void ISel::EmitCMP(SDOperand LHS, SDOperand RHS) { } Opc = 0; - if (isFoldableLoad(LHS)) { + if (HasOneUse && isFoldableLoad(LHS, RHS)) { switch (RHS.getValueType()) { default: break; case MVT::i1: @@ -983,9 +999,30 @@ void ISel::EmitCMP(SDOperand LHS, SDOperand RHS) { BuildMI(BB, Opc, 2).addReg(Tmp1).addReg(Tmp2); } +/// NodeTransitivelyUsesValue - Return true if N or any of its uses uses Op. +/// The DAG cannot have cycles in it, by definition, so the visited set is not +/// needed to prevent infinite loops. The DAG CAN, however, have unbounded +/// reuse, so it prevents exponential cases. +/// +static bool NodeTransitivelyUsesValue(SDOperand N, SDOperand Op, + std::set &Visited) { + if (N == Op) return true; // Found it. + SDNode *Node = N.Val; + if (Node->getNumOperands() == 0) return false; // Leaf? + if (!Visited.insert(Node).second) return false; // Already visited? + + // Recurse for the first N-1 operands. + for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) + if (NodeTransitivelyUsesValue(Node->getOperand(i), Op, Visited)) + return true; + + // Tail recurse for the last operand. + return NodeTransitivelyUsesValue(Node->getOperand(0), Op, Visited); +} + /// isFoldableLoad - Return true if this is a load instruction that can safely /// be folded into an operation that uses it. -bool ISel::isFoldableLoad(SDOperand Op) { +bool ISel::isFoldableLoad(SDOperand Op, SDOperand OtherOp) { if (Op.getOpcode() != ISD::LOAD || // FIXME: currently can't fold constant pool indexes. isa(Op.getOperand(1))) @@ -998,10 +1035,22 @@ bool ISel::isFoldableLoad(SDOperand Op) { assert(!LoweredTokens.count(Op.getValue(1)) && "Token lowered but value not in map?"); - // Finally, there can only be one use of its value. - return Op.Val->hasNUsesOfValue(1, 0); + // If there is not just one use of its value, we cannot fold. + if (!Op.Val->hasNUsesOfValue(1, 0)) return false; + + // Finally, we cannot fold the load into the operation if this would induce a + // cycle into the resultant dag. To check for this, see if OtherOp (the other + // operand of the operation we are folding the load into) can possible use the + // chain node defined by the load. + if (OtherOp.Val && !Op.Val->hasNUsesOfValue(0, 1)) { // Has uses of chain? + std::set Visited; + if (NodeTransitivelyUsesValue(OtherOp, Op.getValue(1), Visited)) + return false; + } + return true; } + /// EmitFoldedLoad - Ensure that the arguments of the load are code generated, /// and compute the address being loaded into AM. void ISel::EmitFoldedLoad(SDOperand Op, X86AddressMode &AM) { @@ -1123,7 +1172,7 @@ unsigned ISel::SelectExpr(SDOperand N) { return Result; } - if (isFoldableLoad(N.getOperand(0))) { + if (isFoldableLoad(N.getOperand(0), SDOperand())) { static const unsigned Opc[3] = { X86::MOVZX32rm8, X86::MOVZX32rm16, X86::MOVZX16rm8 }; @@ -1150,7 +1199,7 @@ unsigned ISel::SelectExpr(SDOperand N) { assert(N.getOperand(0).getValueType() != MVT::i1 && "Sign extend from bool not implemented!"); - if (isFoldableLoad(N.getOperand(0))) { + if (isFoldableLoad(N.getOperand(0), SDOperand())) { static const unsigned Opc[3] = { X86::MOVSX32rm8, X86::MOVSX32rm16, X86::MOVSX16rm8 }; @@ -1170,7 +1219,7 @@ unsigned ISel::SelectExpr(SDOperand N) { } case ISD::TRUNCATE: // Fold TRUNCATE (LOAD P) into a smaller load from P. - if (isFoldableLoad(N.getOperand(0))) { + if (isFoldableLoad(N.getOperand(0), SDOperand())) { switch (N.getValueType()) { default: assert(0 && "Unknown truncate!"); case MVT::i1: @@ -1429,10 +1478,13 @@ unsigned ISel::SelectExpr(SDOperand N) { Op0 = N.getOperand(0); Op1 = N.getOperand(1); - if (isFoldableLoad(Op0)) + if (isFoldableLoad(Op0, Op1)) { std::swap(Op0, Op1); + goto FoldAdd; + } - if (isFoldableLoad(Op1)) { + if (isFoldableLoad(Op1, Op0)) { + FoldAdd: switch (N.getValueType()) { default: assert(0 && "Cannot add this type!"); case MVT::i1: @@ -1572,16 +1624,37 @@ unsigned ISel::SelectExpr(SDOperand N) { if (ConstantSDNode *CN = dyn_cast(Op1)) { if (CN->isAllOnesValue() && Node->getOpcode() == ISD::XOR) { + Opc = 0; switch (N.getValueType()) { default: assert(0 && "Cannot add this type!"); - case MVT::i1: + case MVT::i1: break; // Not supported, don't invert upper bits! case MVT::i8: Opc = X86::NOT8r; break; case MVT::i16: Opc = X86::NOT16r; break; case MVT::i32: Opc = X86::NOT32r; break; } - Tmp1 = SelectExpr(Op0); - BuildMI(BB, Opc, 1, Result).addReg(Tmp1); - return Result; + if (Opc) { + Tmp1 = SelectExpr(Op0); + BuildMI(BB, Opc, 1, Result).addReg(Tmp1); + return Result; + } + } + + // Fold common multiplies into LEA instructions. + if (Node->getOpcode() == ISD::MUL && N.getValueType() == MVT::i32) { + switch ((int)CN->getValue()) { + default: break; + case 3: + case 5: + case 9: + X86AddressMode AM; + // Remove N from exprmap so SelectAddress doesn't get confused. + ExprMap.erase(N); + SelectAddress(N, AM); + // Restore it to the map. + ExprMap[N] = Result; + addFullAddress(BuildMI(BB, X86::LEA32r, 4, Result), AM); + return Result; + } } switch (N.getValueType()) { @@ -1606,9 +1679,10 @@ unsigned ISel::SelectExpr(SDOperand N) { } } - if (isFoldableLoad(Op0)) + if (isFoldableLoad(Op0, Op1)) if (Node->getOpcode() != ISD::SUB) { std::swap(Op0, Op1); + goto FoldOps; } else { // Emit 'reverse' subract, with a memory operand. switch (N.getValueType()) { @@ -1625,7 +1699,8 @@ unsigned ISel::SelectExpr(SDOperand N) { } } - if (isFoldableLoad(Op1)) { + if (isFoldableLoad(Op1, Op0)) { + FoldOps: switch (N.getValueType()) { default: assert(0 && "Cannot operate on this type!"); case MVT::i1: @@ -1698,42 +1773,23 @@ unsigned ISel::SelectExpr(SDOperand N) { return Result; } case ISD::SELECT: - if (N.getValueType() != MVT::i1 && N.getValueType() != MVT::i8) { - if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) { - Tmp2 = SelectExpr(N.getOperand(1)); - Tmp3 = SelectExpr(N.getOperand(2)); - } else { - Tmp3 = SelectExpr(N.getOperand(2)); - Tmp2 = SelectExpr(N.getOperand(1)); - } - EmitSelectCC(N.getOperand(0), N.getValueType(), Tmp2, Tmp3, Result); - return Result; + if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) { + Tmp2 = SelectExpr(N.getOperand(1)); + Tmp3 = SelectExpr(N.getOperand(2)); } else { - // FIXME: This should not be implemented here, it should be in the generic - // code! - if (getRegPressure(N.getOperand(1)) > getRegPressure(N.getOperand(2))) { - Tmp2 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16, - N.getOperand(1))); - Tmp3 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16, - N.getOperand(2))); - } else { - Tmp3 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16, - N.getOperand(2))); - Tmp2 = SelectExpr(CurDAG->getNode(ISD::ZERO_EXTEND, MVT::i16, - N.getOperand(1))); - } - unsigned TmpReg = MakeReg(MVT::i16); - EmitSelectCC(N.getOperand(0), MVT::i16, Tmp2, Tmp3, TmpReg); - // FIXME: need subregs to do better than this! - BuildMI(BB, X86::MOV16rr, 1, X86::AX).addReg(TmpReg); - BuildMI(BB, X86::MOV8rr, 1, Result).addReg(X86::AL); - return Result; + Tmp3 = SelectExpr(N.getOperand(2)); + Tmp2 = SelectExpr(N.getOperand(1)); } + EmitSelectCC(N.getOperand(0), N.getValueType(), Tmp2, Tmp3, Result); + return Result; case ISD::SDIV: case ISD::UDIV: case ISD::SREM: case ISD::UREM: { + assert((N.getOpcode() != ISD::SREM || MVT::isInteger(N.getValueType())) && + "We don't support this operator!"); + if (N.getOpcode() == ISD::SDIV) if (ConstantSDNode *CN = dyn_cast(N.getOperand(1))) { // FIXME: These special cases should be handled by the lowering impl! @@ -1823,10 +1879,7 @@ unsigned ISel::SelectExpr(SDOperand N) { case MVT::i64: assert(0 && "FIXME: implement i64 DIV/REM libcalls!"); case MVT::f32: case MVT::f64: - if (N.getOpcode() == ISD::SDIV) - BuildMI(BB, X86::FpDIV, 2, Result).addReg(Tmp1).addReg(Tmp2); - else - assert(0 && "FIXME: Emit frem libcall to fmod!"); + BuildMI(BB, X86::FpDIV, 2, Result).addReg(Tmp1).addReg(Tmp2); return Result; } @@ -1953,7 +2006,7 @@ unsigned ISel::SelectExpr(SDOperand N) { return Result; case ISD::SETCC: - EmitCMP(N.getOperand(0), N.getOperand(1)); + EmitCMP(N.getOperand(0), N.getOperand(1), Node->hasOneUse()); EmitSetCC(BB, Result, cast(N)->getCondition(), MVT::isFloatingPoint(N.getOperand(1).getValueType())); return Result; @@ -2002,6 +2055,15 @@ unsigned ISel::SelectExpr(SDOperand N) { else Result = ExprMap[N.getValue(0)] = MakeReg(N.getValue(0).getValueType()); + if (ConstantPoolSDNode *CP = dyn_cast(N.getOperand(1))) + if (Node->getValueType(0) == MVT::f64) { + assert(cast(Node)->getExtraValueType() == MVT::f32 && + "Bad EXTLOAD!"); + addConstantPoolReference(BuildMI(BB, X86::FLD32m, 4, Result), + CP->getIndex()); + return Result; + } + X86AddressMode AM; if (getRegPressure(Node->getOperand(0)) > getRegPressure(Node->getOperand(1))) { @@ -2177,6 +2239,213 @@ unsigned ISel::SelectExpr(SDOperand N) { return 0; } +/// TryToFoldLoadOpStore - Given a store node, try to fold together a +/// load/op/store instruction. If successful return true. +bool ISel::TryToFoldLoadOpStore(SDNode *Node) { + assert(Node->getOpcode() == ISD::STORE && "Can only do this for stores!"); + SDOperand Chain = Node->getOperand(0); + SDOperand StVal = Node->getOperand(1); + SDOperand StPtr = Node->getOperand(2); + + // The chain has to be a load, the stored value must be an integer binary + // operation with one use. + if (!StVal.Val->hasOneUse() || StVal.Val->getNumOperands() != 2 || + MVT::isFloatingPoint(StVal.getValueType())) + return false; + + // Token chain must either be a factor node or the load to fold. + if (Chain.getOpcode() != ISD::LOAD && Chain.getOpcode() != ISD::TokenFactor) + return false; + + SDOperand TheLoad; + + // Check to see if there is a load from the same pointer that we're storing + // to in either operand of the binop. + if (StVal.getOperand(0).getOpcode() == ISD::LOAD && + StVal.getOperand(0).getOperand(1) == StPtr) + TheLoad = StVal.getOperand(0); + else if (StVal.getOperand(1).getOpcode() == ISD::LOAD && + StVal.getOperand(1).getOperand(1) == StPtr) + TheLoad = StVal.getOperand(1); + else + return false; // No matching load operand. + + // We can only fold the load if there are no intervening side-effecting + // operations. This means that the store uses the load as its token chain, or + // there are only token factor nodes in between the store and load. + if (Chain != TheLoad.getValue(1)) { + // Okay, the other option is that we have a store referring to (possibly + // nested) token factor nodes. For now, just try peeking through one level + // of token factors to see if this is the case. + bool ChainOk = false; + if (Chain.getOpcode() == ISD::TokenFactor) { + for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) + if (Chain.getOperand(i) == TheLoad.getValue(1)) { + ChainOk = true; + break; + } + } + + if (!ChainOk) return false; + } + + if (TheLoad.getOperand(1) != StPtr) + return false; + + // Make sure that one of the operands of the binop is the load, and that the + // load folds into the binop. + if (((StVal.getOperand(0) != TheLoad || + !isFoldableLoad(TheLoad, StVal.getOperand(1))) && + (StVal.getOperand(1) != TheLoad || + !isFoldableLoad(TheLoad, StVal.getOperand(0))))) + return false; + + // Finally, check to see if this is one of the ops we can handle! + static const unsigned ADDTAB[] = { + X86::ADD8mi, X86::ADD16mi, X86::ADD32mi, + X86::ADD8mr, X86::ADD16mr, X86::ADD32mr, + }; + static const unsigned SUBTAB[] = { + X86::SUB8mi, X86::SUB16mi, X86::SUB32mi, + X86::SUB8mr, X86::SUB16mr, X86::SUB32mr, + }; + static const unsigned ANDTAB[] = { + X86::AND8mi, X86::AND16mi, X86::AND32mi, + X86::AND8mr, X86::AND16mr, X86::AND32mr, + }; + static const unsigned ORTAB[] = { + X86::OR8mi, X86::OR16mi, X86::OR32mi, + X86::OR8mr, X86::OR16mr, X86::OR32mr, + }; + static const unsigned XORTAB[] = { + X86::XOR8mi, X86::XOR16mi, X86::XOR32mi, + X86::XOR8mr, X86::XOR16mr, X86::XOR32mr, + }; + static const unsigned SHLTAB[] = { + X86::SHL8mi, X86::SHL16mi, X86::SHL32mi, + /*Have to put the reg in CL*/0, 0, 0, + }; + static const unsigned SARTAB[] = { + X86::SAR8mi, X86::SAR16mi, X86::SAR32mi, + /*Have to put the reg in CL*/0, 0, 0, + }; + static const unsigned SHRTAB[] = { + X86::SHR8mi, X86::SHR16mi, X86::SHR32mi, + /*Have to put the reg in CL*/0, 0, 0, + }; + + const unsigned *TabPtr = 0; + switch (StVal.getOpcode()) { + default: + std::cerr << "CANNOT [mem] op= val: "; + StVal.Val->dump(); std::cerr << "\n"; + case ISD::MUL: + case ISD::SDIV: + case ISD::UDIV: + case ISD::SREM: + case ISD::UREM: return false; + + case ISD::ADD: TabPtr = ADDTAB; break; + case ISD::SUB: TabPtr = SUBTAB; break; + case ISD::AND: TabPtr = ANDTAB; break; + case ISD:: OR: TabPtr = ORTAB; break; + case ISD::XOR: TabPtr = XORTAB; break; + case ISD::SHL: TabPtr = SHLTAB; break; + case ISD::SRA: TabPtr = SARTAB; break; + case ISD::SRL: TabPtr = SHRTAB; break; + } + + // Handle: [mem] op= CST + SDOperand Op0 = StVal.getOperand(0); + SDOperand Op1 = StVal.getOperand(1); + unsigned Opc; + if (ConstantSDNode *CN = dyn_cast(Op1)) { + switch (Op0.getValueType()) { // Use Op0's type because of shifts. + default: break; + case MVT::i1: + case MVT::i8: Opc = TabPtr[0]; break; + case MVT::i16: Opc = TabPtr[1]; break; + case MVT::i32: Opc = TabPtr[2]; break; + } + + if (Opc) { + LoweredTokens.insert(TheLoad.getValue(1)); + Select(Chain); + + X86AddressMode AM; + if (getRegPressure(TheLoad.getOperand(0)) > + getRegPressure(TheLoad.getOperand(1))) { + Select(TheLoad.getOperand(0)); + SelectAddress(TheLoad.getOperand(1), AM); + } else { + SelectAddress(TheLoad.getOperand(1), AM); + Select(TheLoad.getOperand(0)); + } + + if (StVal.getOpcode() == ISD::ADD) { + if (CN->getValue() == 1) { + switch (Op0.getValueType()) { + default: break; + case MVT::i8: + addFullAddress(BuildMI(BB, X86::INC8m, 4), AM); + return true; + case MVT::i16: Opc = TabPtr[1]; + addFullAddress(BuildMI(BB, X86::INC16m, 4), AM); + return true; + case MVT::i32: Opc = TabPtr[2]; + addFullAddress(BuildMI(BB, X86::INC32m, 4), AM); + return true; + } + } else if (CN->getValue()+1 == 0) { // [X] += -1 -> DEC [X] + switch (Op0.getValueType()) { + default: break; + case MVT::i8: + addFullAddress(BuildMI(BB, X86::DEC8m, 4), AM); + return true; + case MVT::i16: Opc = TabPtr[1]; + addFullAddress(BuildMI(BB, X86::DEC16m, 4), AM); + return true; + case MVT::i32: Opc = TabPtr[2]; + addFullAddress(BuildMI(BB, X86::DEC32m, 4), AM); + return true; + } + } + } + + addFullAddress(BuildMI(BB, Opc, 4+1),AM).addImm(CN->getValue()); + return true; + } + } + + // If we have [mem] = V op [mem], try to turn it into: + // [mem] = [mem] op V. + if (Op1 == TheLoad && StVal.getOpcode() != ISD::SUB && + StVal.getOpcode() != ISD::SHL && StVal.getOpcode() != ISD::SRA && + StVal.getOpcode() != ISD::SRL) + std::swap(Op0, Op1); + + if (Op0 != TheLoad) return false; + + switch (Op0.getValueType()) { + default: return false; + case MVT::i1: + case MVT::i8: Opc = TabPtr[3]; break; + case MVT::i16: Opc = TabPtr[4]; break; + case MVT::i32: Opc = TabPtr[5]; break; + } + + LoweredTokens.insert(TheLoad.getValue(1)); + Select(Chain); + + Select(TheLoad.getOperand(0)); + X86AddressMode AM; + SelectAddress(TheLoad.getOperand(1), AM); + unsigned Reg = SelectExpr(Op1); + addFullAddress(BuildMI(BB, Opc, 4+1),AM).addReg(Reg); + return true; +} + + void ISel::Select(SDOperand N) { unsigned Tmp1, Tmp2, Opc; @@ -2309,10 +2578,35 @@ void ISel::Select(SDOperand N) { return; } + case ISD::LOAD: + // If this load could be folded into the only using instruction, and if it + // is safe to emit the instruction here, try to do so now. + if (Node->hasNUsesOfValue(1, 0)) { + SDOperand TheVal = N.getValue(0); + SDNode *User = 0; + for (SDNode::use_iterator UI = Node->use_begin(); ; ++UI) { + assert(UI != Node->use_end() && "Didn't find use!"); + SDNode *UN = *UI; + for (unsigned i = 0, e = UN->getNumOperands(); i != e; ++i) + if (UN->getOperand(i) == TheVal) { + User = UN; + goto FoundIt; + } + } + FoundIt: + // Only handle unary operators right now. + if (User->getNumOperands() == 1) { + LoweredTokens.erase(N); + SelectExpr(SDOperand(User, 0)); + return; + } + } + SelectExpr(N); + return; + case ISD::EXTLOAD: case ISD::SEXTLOAD: case ISD::ZEXTLOAD: - case ISD::LOAD: case ISD::CALL: case ISD::DYNAMIC_STACKALLOC: SelectExpr(N); @@ -2322,8 +2616,23 @@ void ISel::Select(SDOperand N) { // On X86, we can represent all types except for Bool and Float natively. X86AddressMode AM; MVT::ValueType StoredTy = cast(Node)->getExtraValueType(); - assert((StoredTy == MVT::i1 || StoredTy == MVT::f32) && - "Unsupported TRUNCSTORE for this target!"); + assert((StoredTy == MVT::i1 || StoredTy == MVT::f32 || + StoredTy == MVT::i16 /*FIXME: THIS IS JUST FOR TESTING!*/) + && "Unsupported TRUNCSTORE for this target!"); + + if (StoredTy == MVT::i16) { + // FIXME: This is here just to allow testing. X86 doesn't really have a + // TRUNCSTORE i16 operation, but this is required for targets that do not + // have 16-bit integer registers. We occasionally disable 16-bit integer + // registers to test the promotion code. + Select(N.getOperand(0)); + Tmp1 = SelectExpr(N.getOperand(1)); + SelectAddress(N.getOperand(2), AM); + + BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Tmp1); + addFullAddress(BuildMI(BB, X86::MOV16mr, 5), AM).addReg(X86::AX); + return; + } // Store of constant bool? if (ConstantSDNode *CN = dyn_cast(N.getOperand(1))) { @@ -2389,121 +2698,8 @@ void ISel::Select(SDOperand N) { } // Check to see if this is a load/op/store combination. - if (N.getOperand(1).Val->hasOneUse() && - isFoldableLoad(N.getOperand(0).getValue(0)) && - !MVT::isFloatingPoint(N.getOperand(0).getValue(0).getValueType())) { - SDOperand TheLoad = N.getOperand(0).getValue(0); - // Check to see if we are loading the same pointer that we're storing to. - if (TheLoad.getOperand(1) == N.getOperand(2)) { - // See if the stored value is a simple binary operator that uses the - // load as one of its operands. - SDOperand Op = N.getOperand(1); - if (Op.Val->getNumOperands() == 2 && - (Op.getOperand(0) == TheLoad || Op.getOperand(1) == TheLoad)) { - // Finally, check to see if this is one of the ops we can handle! - static const unsigned ADDTAB[] = { - X86::ADD8mi, X86::ADD16mi, X86::ADD32mi, - X86::ADD8mr, X86::ADD16mr, X86::ADD32mr, - }; - static const unsigned SUBTAB[] = { - X86::SUB8mi, X86::SUB16mi, X86::SUB32mi, - X86::SUB8mr, X86::SUB16mr, X86::SUB32mr, - }; - static const unsigned ANDTAB[] = { - X86::AND8mi, X86::AND16mi, X86::AND32mi, - X86::AND8mr, X86::AND16mr, X86::AND32mr, - }; - static const unsigned ORTAB[] = { - X86::OR8mi, X86::OR16mi, X86::OR32mi, - X86::OR8mr, X86::OR16mr, X86::OR32mr, - }; - static const unsigned XORTAB[] = { - X86::XOR8mi, X86::XOR16mi, X86::XOR32mi, - X86::XOR8mr, X86::XOR16mr, X86::XOR32mr, - }; - static const unsigned SHLTAB[] = { - X86::SHL8mi, X86::SHL16mi, X86::SHL32mi, - /*Have to put the reg in CL*/0, 0, 0, - }; - static const unsigned SARTAB[] = { - X86::SAR8mi, X86::SAR16mi, X86::SAR32mi, - /*Have to put the reg in CL*/0, 0, 0, - }; - static const unsigned SHRTAB[] = { - X86::SHR8mi, X86::SHR16mi, X86::SHR32mi, - /*Have to put the reg in CL*/0, 0, 0, - }; - - const unsigned *TabPtr = 0; - switch (Op.getOpcode()) { - default: std::cerr << "CANNOT [mem] op= val: "; Op.Val->dump(); std::cerr << "\n"; break; - case ISD::ADD: TabPtr = ADDTAB; break; - case ISD::SUB: TabPtr = SUBTAB; break; - case ISD::AND: TabPtr = ANDTAB; break; - case ISD:: OR: TabPtr = ORTAB; break; - case ISD::XOR: TabPtr = XORTAB; break; - case ISD::SHL: TabPtr = SHLTAB; break; - case ISD::SRA: TabPtr = SARTAB; break; - case ISD::SRL: TabPtr = SHRTAB; break; - } - - if (TabPtr) { - // Handle: [mem] op= CST - SDOperand Op0 = Op.getOperand(0); - SDOperand Op1 = Op.getOperand(1); - if (ConstantSDNode *CN = dyn_cast(Op1)) { - switch (Op0.getValueType()) { // Use Op0's type because of shifts. - default: break; - case MVT::i1: - case MVT::i8: Opc = TabPtr[0]; break; - case MVT::i16: Opc = TabPtr[1]; break; - case MVT::i32: Opc = TabPtr[2]; break; - } - - if (Opc) { - if (getRegPressure(TheLoad.getOperand(0)) > - getRegPressure(TheLoad.getOperand(1))) { - Select(TheLoad.getOperand(0)); - SelectAddress(TheLoad.getOperand(1), AM); - } else { - SelectAddress(TheLoad.getOperand(1), AM); - Select(TheLoad.getOperand(0)); - } - - addFullAddress(BuildMI(BB, Opc, 4+1),AM).addImm(CN->getValue()); - return; - } - } - - // If we have [mem] = V op [mem], try to turn it into: - // [mem] = [mem] op V. - if (Op1 == TheLoad && Op.getOpcode() != ISD::SUB && - Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRA && - Op.getOpcode() != ISD::SRL) - std::swap(Op0, Op1); - - if (Op0 == TheLoad) { - switch (Op0.getValueType()) { - default: break; - case MVT::i1: - case MVT::i8: Opc = TabPtr[3]; break; - case MVT::i16: Opc = TabPtr[4]; break; - case MVT::i32: Opc = TabPtr[5]; break; - } - - if (Opc) { - Select(TheLoad.getOperand(0)); - SelectAddress(TheLoad.getOperand(1), AM); - unsigned Reg = SelectExpr(Op1); - addFullAddress(BuildMI(BB, Opc, 4+1),AM).addReg(Reg); - return; - } - } - } - } - } - } - + if (TryToFoldLoadOpStore(Node)) + return; switch (N.getOperand(1).getValueType()) { default: assert(0 && "Cannot store this type!");