DominatorTree &getDominatorTree() const { return *DT; }
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
MemoryDependenceAnalysis &getMemDep() const { return *MD; }
+
+ // Assign VNs for instructions newly created during GVN optimization.
+ void addNewInstruction(Value *Val) {
+ if (Instruction *I = dyn_cast<Instruction>(Val)) {
+ unsigned Num = VN.lookup_or_add(I);
+ addToLeaderTable(Num, I, I->getParent());
+ }
+ }
+
private:
/// Push a new Value to the LeaderTable onto the list for its value number.
void addToLeaderTable(uint32_t N, Value *V, const BasicBlock *BB) {
/// before we give up.
static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
Type *LoadTy,
- Instruction *InsertPt, const DataLayout &DL){
+ Instruction *InsertPt, const DataLayout &DL,
+ GVN &gvn){
LLVMContext &Ctx = SrcVal->getType()->getContext();
uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
// Compute which bits of the stored value are being used by the load. Convert
// to an integer type to start with.
- if (SrcVal->getType()->getScalarType()->isPointerTy())
+ if (SrcVal->getType()->getScalarType()->isPointerTy()) {
SrcVal = Builder.CreatePtrToInt(SrcVal,
DL.getIntPtrType(SrcVal->getType()));
- if (!SrcVal->getType()->isIntegerTy())
+ gvn.addNewInstruction(SrcVal);
+ }
+ if (!SrcVal->getType()->isIntegerTy()) {
SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8));
+ gvn.addNewInstruction(SrcVal);
+ }
// Shift the bits to the least significant depending on endianness.
unsigned ShiftAmt;
else
ShiftAmt = (StoreSize-LoadSize-Offset)*8;
- if (ShiftAmt)
+ if (ShiftAmt) {
SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt);
+ gvn.addNewInstruction(SrcVal);
+ }
- if (LoadSize != StoreSize)
+ if (LoadSize != StoreSize) {
SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8));
+ gvn.addNewInstruction(SrcVal);
+ }
return CoerceAvailableValueToLoadType(SrcVal, LoadTy, Builder, DL);
}
PtrVal->getType()->getPointerAddressSpace());
Builder.SetCurrentDebugLocation(SrcVal->getDebugLoc());
PtrVal = Builder.CreateBitCast(PtrVal, DestPTy);
+ gvn.addNewInstruction(PtrVal);
LoadInst *NewLoad = Builder.CreateLoad(PtrVal);
NewLoad->takeName(SrcVal);
NewLoad->setAlignment(SrcVal->getAlignment());
// Replace uses of the original load with the wider load. On a big endian
// system, we need to shift down to get the relevant bits.
Value *RV = NewLoad;
- if (DL.isBigEndian())
+ if (DL.isBigEndian()) {
RV = Builder.CreateLShr(RV,
NewLoadSize*8-SrcVal->getType()->getPrimitiveSizeInBits());
+ gvn.addNewInstruction(RV);
+ }
RV = Builder.CreateTrunc(RV, SrcVal->getType());
+ gvn.addNewInstruction(RV);
SrcVal->replaceAllUsesWith(RV);
// We would like to use gvn.markInstructionForDeletion here, but we can't
SrcVal = NewLoad;
}
- return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, DL);
+ return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, DL, gvn);
}
/// memdep query of a load that ends up being a clobbering mem intrinsic.
static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
- const DataLayout &DL){
+ const DataLayout &DL, GVN &gvn){
LLVMContext &Ctx = LoadTy->getContext();
uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy)/8;
// memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
// independently of what the offset is.
Value *Val = MSI->getValue();
- if (LoadSize != 1)
+ if (LoadSize != 1) {
Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
+ gvn.addNewInstruction(Val);
+ }
Value *OneElt = Val;
// If we can double the number of bytes set, do it.
if (NumBytesSet*2 <= LoadSize) {
Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
+ gvn.addNewInstruction(ShVal);
Val = Builder.CreateOr(Val, ShVal);
+ gvn.addNewInstruction(Val);
NumBytesSet <<= 1;
continue;
}
// Otherwise insert one byte at a time.
Value *ShVal = Builder.CreateShl(Val, 1*8);
+ gvn.addNewInstruction(ShVal);
Val = Builder.CreateOr(OneElt, ShVal);
+ gvn.addNewInstruction(Val);
++NumBytesSet;
}
if (isSimpleValue()) {
Res = getSimpleValue();
if (Res->getType() != LoadTy) {
- Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), DL);
+ Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), DL, gvn);
DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
<< *getSimpleValue() << '\n'
}
} else if (isMemIntrinValue()) {
Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
- BB->getTerminator(), DL);
+ BB->getTerminator(), DL, gvn);
DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
<< " " << *getMemIntrinValue() << '\n'
<< *Res << '\n' << "\n\n\n");
L->getType(), L->getPointerOperand(), DepSI);
if (Offset != -1)
AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,
- L->getType(), L, DL);
+ L->getType(), L, DL, *this);
}
// Check to see if we have something like this:
int Offset = AnalyzeLoadFromClobberingMemInst(
L->getType(), L->getPointerOperand(), DepMI, DL);
if (Offset != -1)
- AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, DL);
+ AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, DL, *this);
}
if (AvailVal) {
--- /dev/null
+;RUN: opt -gvn -S < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n8:16:32-S64"
+target triple = "thumbv7--linux-gnueabi"
+
+%struct.a = type { i16, i16, [1 x %union.a] }
+%union.a = type { i32 }
+
+@length = external global [0 x i32], align 4
+
+; Function Attrs: nounwind
+define fastcc void @foo(%struct.a* nocapture readonly %x) {
+;CHECK-LABEL: foo
+entry:
+ br label %bb0
+
+bb0: ; preds = %land.lhs.true, %entry
+;CHECK: bb0:
+ %x.tr = phi %struct.a* [ %x, %entry ], [ null, %land.lhs.true ]
+ %code1 = getelementptr inbounds %struct.a, %struct.a* %x.tr, i32 0, i32 0
+ %0 = load i16, i16* %code1, align 4
+; CHECK: load i32, i32*
+ %conv = zext i16 %0 to i32
+ switch i32 %conv, label %if.end.50 [
+ i32 43, label %cleanup
+ i32 52, label %if.then.5
+ ]
+
+if.then.5: ; preds = %bb0
+ br i1 undef, label %land.lhs.true, label %if.then.26
+
+land.lhs.true: ; preds = %if.then.5
+ br i1 undef, label %cleanup, label %bb0
+
+if.then.26: ; preds = %if.then.5
+ %x.tr.lcssa163 = phi %struct.a* [ %x.tr, %if.then.5 ]
+ br i1 undef, label %cond.end, label %cond.false
+
+cond.false: ; preds = %if.then.26
+; CHECK: cond.false:
+; CHECK-NOT: load
+ %mode = getelementptr inbounds %struct.a, %struct.a* %x.tr.lcssa163, i32 0, i32 1
+ %bf.load = load i16, i16* %mode, align 2
+ %bf.shl = shl i16 %bf.load, 8
+ br label %cond.end
+
+cond.end: ; preds = %cond.false, %if.then.26
+ br i1 undef, label %if.then.44, label %cleanup
+
+if.then.44: ; preds = %cond.end
+ unreachable
+
+if.end.50: ; preds = %bb0
+;%CHECK: if.end.50:
+ %conv.lcssa = phi i32 [ %conv, %bb0 ]
+ %arrayidx52 = getelementptr inbounds [0 x i32], [0 x i32]* @length, i32 0, i32 %conv.lcssa
+ %1 = load i32, i32* %arrayidx52, align 4
+ br i1 undef, label %for.body.57, label %cleanup
+
+for.body.57: ; preds = %if.end.50
+ %i.2157 = add nsw i32 %1, -1
+ unreachable
+
+cleanup: ; preds = %if.end.50, %cond.end, %land.lhs.true, %bb0
+ ret void
+}