From: Chris Lattner Date: Mon, 5 Dec 2005 07:10:48 +0000 (+0000) Subject: Fix the #1 code quality problem that I have seen on X86 (and it also affects X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=c88d8e944dae71e31595b8ae264668e68db6b8ed;p=oota-llvm.git Fix the #1 code quality problem that I have seen on X86 (and it also affects PPC and other targets). In a particular, consider code like this: struct Vector3 { double x, y, z; }; struct Matrix3 { Vector3 a, b, c; }; double dot(Vector3 &a, Vector3 &b) { return a.x * b.x + a.y * b.y + a.z * b.z; } Vector3 mul(Vector3 &a, Matrix3 &b) { Vector3 r; r.x = dot( a, b.a ); r.y = dot( a, b.b ); r.z = dot( a, b.c ); return r; } void transform(Matrix3 &m, Vector3 *x, int n) { for (int i = 0; i < n; i++) x[i] = mul( x[i], m ); } we compile transform to a loop with all of the GEP instructions for indexing into 'm' pulled out of the loop (9 of them). Because isel occurs a bb at a time we are unable to fold the constant index into the loads in the loop, leading to PPC code that looks like this: LBB3_1: ; no_exit.preheader li r2, 0 addi r6, r3, 64 ;; 9 values live across the loop body! addi r7, r3, 56 addi r8, r3, 48 addi r9, r3, 40 addi r10, r3, 32 addi r11, r3, 24 addi r12, r3, 16 addi r30, r3, 8 LBB3_2: ; no_exit lfd f0, 0(r30) lfd f1, 8(r4) fmul f0, f1, f0 lfd f2, 0(r3) ;; no constant indices folded into the loads! lfd f3, 0(r4) lfd f4, 0(r10) lfd f5, 0(r6) lfd f6, 0(r7) lfd f7, 0(r8) lfd f8, 0(r9) lfd f9, 0(r11) lfd f10, 0(r12) lfd f11, 16(r4) fmadd f0, f3, f2, f0 fmul f2, f1, f4 fmadd f0, f11, f10, f0 fmadd f2, f3, f9, f2 fmul f1, f1, f6 stfd f0, 0(r4) fmadd f0, f11, f8, f2 fmadd f1, f3, f7, f1 stfd f0, 8(r4) fmadd f0, f11, f5, f1 addi r29, r4, 24 stfd f0, 16(r4) addi r2, r2, 1 cmpw cr0, r2, r5 or r4, r29, r29 bne cr0, LBB3_2 ; no_exit uh, yuck. With this patch, we now sink the constant offsets into the loop, producing this code: LBB3_1: ; no_exit.preheader li r2, 0 LBB3_2: ; no_exit lfd f0, 8(r3) lfd f1, 8(r4) fmul f0, f1, f0 lfd f2, 0(r3) lfd f3, 0(r4) lfd f4, 32(r3) ;; much nicer. lfd f5, 64(r3) lfd f6, 56(r3) lfd f7, 48(r3) lfd f8, 40(r3) lfd f9, 24(r3) lfd f10, 16(r3) lfd f11, 16(r4) fmadd f0, f3, f2, f0 fmul f2, f1, f4 fmadd f0, f11, f10, f0 fmadd f2, f3, f9, f2 fmul f1, f1, f6 stfd f0, 0(r4) fmadd f0, f11, f8, f2 fmadd f1, f3, f7, f1 stfd f0, 8(r4) fmadd f0, f11, f5, f1 addi r6, r4, 24 stfd f0, 16(r4) addi r2, r2, 1 cmpw cr0, r2, r5 or r4, r6, r6 bne cr0, LBB3_2 ; no_exit This is much nicer as it reduces register pressure in the loop a lot. On X86, this takes the function from having 9 spilled registers to 2. This should help some spec programs on X86 (gzip?) This is currently only enabled with -enable-gep-isel-opt to allow perf testing tonight. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@24606 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 1d92bcb4f7d..0f5743860da 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -40,6 +40,10 @@ #include using namespace llvm; +static cl::opt +GEPISelTest("enable-gep-isel-opt", cl::Hidden, + cl::desc("temporary for testing")); + #ifndef NDEBUG static cl::opt ViewDAGs("view-isel-dags", cl::Hidden, @@ -618,7 +622,7 @@ void SelectionDAGLowering::visitGetElementPtr(User &I) { for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end(); OI != E; ++OI) { Value *Idx = *OI; - if (const StructType *StTy = dyn_cast (Ty)) { + if (const StructType *StTy = dyn_cast(Ty)) { unsigned Field = cast(Idx)->getValue(); if (Field) { // N = N + Offset @@ -1224,21 +1228,173 @@ void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const { // updates dom and loop info. } + +/// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset, +/// casting to the type of GEPI. +static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction *GEPI, + Value *Ptr, Value *PtrOffset) { + if (V) return V; // Already computed. + + BasicBlock::iterator InsertPt; + if (BB == GEPI->getParent()) { + // If insert into the GEP's block, insert right after the GEP. + InsertPt = GEPI; + ++InsertPt; + } else { + // Otherwise, insert at the top of BB, after any PHI nodes + InsertPt = BB->begin(); + while (isa(InsertPt)) ++InsertPt; + } + + // Add the offset, cast it to the right type. + Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt); + Ptr = new CastInst(Ptr, GEPI->getType(), "", InsertPt); + return V = Ptr; +} + + +/// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction +/// selection, we want to be a bit careful about some things. In particular, if +/// we have a GEP instruction that is used in a different block than it is +/// defined, the addressing expression of the GEP cannot be folded into loads or +/// stores that use it. In this case, decompose the GEP and move constant +/// indices into blocks that use it. +static void OptimizeGEPExpression(GetElementPtrInst *GEPI, + const TargetData &TD) { + if (!GEPISelTest) return; + + // If this GEP is only used inside the block it is defined in, there is no + // need to rewrite it. + bool isUsedOutsideDefBB = false; + BasicBlock *DefBB = GEPI->getParent(); + for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end(); + UI != E; ++UI) { + if (cast(*UI)->getParent() != DefBB) { + isUsedOutsideDefBB = true; + break; + } + } + if (!isUsedOutsideDefBB) return; + + // If this GEP has no non-zero constant indices, there is nothing we can do, + // ignore it. + bool hasConstantIndex = false; + for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, + E = GEPI->op_end(); OI != E; ++OI) { + if (ConstantInt *CI = dyn_cast(*OI)) + if (CI->getRawValue()) { + hasConstantIndex = true; + break; + } + } + if (!hasConstantIndex) return; + + // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the + // constant offset (which we now know is non-zero) and deal with it later. + uint64_t ConstantOffset = 0; + const Type *UIntPtrTy = TD.getIntPtrType(); + Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI); + const Type *Ty = GEPI->getOperand(0)->getType(); + + for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1, + E = GEPI->op_end(); OI != E; ++OI) { + Value *Idx = *OI; + if (const StructType *StTy = dyn_cast(Ty)) { + unsigned Field = cast(Idx)->getValue(); + if (Field) + ConstantOffset += TD.getStructLayout(StTy)->MemberOffsets[Field]; + Ty = StTy->getElementType(Field); + } else { + Ty = cast(Ty)->getElementType(); + + // Handle constant subscripts. + if (ConstantInt *CI = dyn_cast(Idx)) { + if (CI->getRawValue() == 0) continue; + + if (ConstantSInt *CSI = dyn_cast(CI)) + ConstantOffset += (int64_t)TD.getTypeSize(Ty)*CSI->getValue(); + else + ConstantOffset+=TD.getTypeSize(Ty)*cast(CI)->getValue(); + continue; + } + + // Ptr = Ptr + Idx * ElementSize; + + // Cast Idx to UIntPtrTy if needed. + Idx = new CastInst(Idx, UIntPtrTy, "", GEPI); + + uint64_t ElementSize = TD.getTypeSize(Ty); + // Mask off bits that should not be set. + ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); + Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize); + + // Multiply by the element size and add to the base. + Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI); + Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI); + } + } + + // Make sure that the offset fits in uintptr_t. + ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits()); + Constant *PtrOffset = ConstantUInt::get(UIntPtrTy, ConstantOffset); + + // Okay, we have now emitted all of the variable index parts to the BB that + // the GEP is defined in. Loop over all of the using instructions, inserting + // an "add Ptr, ConstantOffset" into each block that uses it and update the + // instruction to use the newly computed value, making GEPI dead. + std::map InsertedExprs; + while (!GEPI->use_empty()) { + Instruction *User = cast(GEPI->use_back()); + + // Handle PHI's specially, as we need to insert code in the predecessor + // blocks for uses, not in the PHI block. + if (PHINode *PN = dyn_cast(User)) { + for (PHINode::op_iterator OI = PN->op_begin(), E = PN->op_end(); + OI != E; OI += 2) { + if (*OI == GEPI) { + BasicBlock *Pred = cast(OI[1]); + *OI = InsertGEPComputeCode(InsertedExprs[Pred], Pred, GEPI, + Ptr, PtrOffset); + } + } + continue; + } + + // Otherwise, insert the code in the User's block so it can be folded into + // any users in that block. + Value *V = InsertGEPComputeCode(InsertedExprs[User->getParent()], + User->getParent(), GEPI, + Ptr, PtrOffset); + User->replaceUsesOfWith(GEPI, V); + } + + // Finally, the GEP is dead, remove it. + GEPI->eraseFromParent(); +} + bool SelectionDAGISel::runOnFunction(Function &Fn) { MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine()); RegMap = MF.getSSARegMap(); DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n"); - // First pass, split all critical edges for PHI nodes with incoming values - // that are constants, this way the load of the constant into a vreg will not - // be placed into MBBs that are used some other way. + // First, split all critical edges for PHI nodes with incoming values that are + // constants, this way the load of the constant into a vreg will not be placed + // into MBBs that are used some other way. + // + // In this pass we also look for GEP instructions that are used across basic + // blocks and rewrites them to improve basic-block-at-a-time selection. + // for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) { PHINode *PN; - for (BasicBlock::iterator BBI = BB->begin(); - (PN = dyn_cast(BBI)); ++BBI) + BasicBlock::iterator BBI; + for (BBI = BB->begin(); (PN = dyn_cast(BBI)); ++BBI) for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) if (isa(PN->getIncomingValue(i))) SplitCriticalEdge(PN->getIncomingBlock(i), BB); + + for (BasicBlock::iterator E = BB->end(); BBI != E; ) + if (GetElementPtrInst *GEPI = dyn_cast(BBI++)) + OptimizeGEPExpression(GEPI, TLI.getTargetData()); } FunctionLoweringInfo FuncInfo(TLI, Fn, MF);