From: Dale Johannesen Date: Thu, 17 May 2007 21:31:21 +0000 (+0000) Subject: More effective breakdown of memcpy into repeated load/store. These are now X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=8dd86c14d46b2deb1e1335d495acc579d70bf60f;p=oota-llvm.git More effective breakdown of memcpy into repeated load/store. These are now in the order lod;lod;lod;sto;sto;sto which means the load-store optimizer has a better chance of producing ldm/stm. Ideally you would get cooperation from the RA as well but this is not there yet. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@37179 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index d059db1acfe..d30a288d198 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -187,7 +187,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) // Expand mem operations genericly. setOperationAction(ISD::MEMSET , MVT::Other, Expand); - setOperationAction(ISD::MEMCPY , MVT::Other, Expand); + setOperationAction(ISD::MEMCPY , MVT::Other, Custom); setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); // Use the default implementation. @@ -255,6 +255,8 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) setSchedulingPreference(SchedulingForRegPressure); computeRegisterProperties(); + + maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type } @@ -1328,6 +1330,78 @@ static SDOperand LowerSRx(SDOperand Op, SelectionDAG &DAG, return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi); } +SDOperand ARMTargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) { + SDOperand Chain = Op.getOperand(0); + SDOperand Dest = Op.getOperand(1); + SDOperand Src = Op.getOperand(2); + SDOperand Count = Op.getOperand(3); + unsigned Align = + (unsigned)cast(Op.getOperand(4))->getValue(); + if (Align == 0) Align = 1; + + ConstantSDNode *I = dyn_cast(Count); + // Just call memcpy if: + // not 4-byte aligned + // size is unknown + // size is >= the threshold. + if ((Align & 3) != 0 || + !I || + I->getValue() >= 64 || + (I->getValue() & 3) != 0) { + MVT::ValueType IntPtr = getPointerTy(); + TargetLowering::ArgListTy Args; + TargetLowering::ArgListEntry Entry; + Entry.Ty = getTargetData()->getIntPtrType(); + Entry.Node = Op.getOperand(1); Args.push_back(Entry); + Entry.Node = Op.getOperand(2); Args.push_back(Entry); + Entry.Node = Op.getOperand(3); Args.push_back(Entry); + std::pair CallResult = + LowerCallTo(Chain, Type::VoidTy, false, false, CallingConv::C, false, + DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG); + return CallResult.second; + } + + // Otherwise do repeated 4-byte loads and stores. To be improved. + assert((I->getValue() & 3) == 0); + assert((Align & 3) == 0); + unsigned NumMemOps = I->getValue() >> 2; + unsigned EmittedNumMemOps = 0; + unsigned SrcOff = 0, DstOff = 0; + MVT::ValueType VT = MVT::i32; + unsigned VTSize = 4; + const int MAX_LOADS_IN_LDM = 6; + SDOperand LoadChains[MAX_LOADS_IN_LDM]; + SDOperand Loads[MAX_LOADS_IN_LDM]; + + // Emit up to 4 loads, then a TokenFactor barrier, then the same + // number of stores. The loads and stores will get combined into + // ldm/stm later on. + while(EmittedNumMemOps < NumMemOps) { + unsigned int i; + for (i=0; i