return ReplaceInstUsesWith(I, NewPN);
}
-/// FindElementAtOffset - Given a type and a constant offset, determine whether
-/// or not there is a sequence of GEP indices into the type that will land us at
-/// the specified offset. If so, fill them into NewIndices and return the
-/// resultant element type, otherwise return null.
-Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
- SmallVectorImpl<Value*> &NewIndices) {
- if (!TD) return 0;
- if (!Ty->isSized()) return 0;
+/// FindElementAtOffset - Given a pointer type and a constant offset, determine
+/// whether or not there is a sequence of GEP indices into the pointed type that
+/// will land us at the specified offset. If so, fill them into NewIndices and
+/// return the resultant element type, otherwise return null.
+Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
+ SmallVectorImpl<Value*> &NewIndices) {
+ assert(PtrTy->isPtrOrPtrVectorTy());
+
+ if (!TD)
+ return 0;
+
+ Type *Ty = PtrTy->getPointerElementType();
+ if (!Ty->isSized())
+ return 0;
// Start with the index over the outer type. Note that the type size
// might be zero (even if the offset isn't zero) if the indexed type
// is something like [0 x {int, int}]
- Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(PtrTy);
int64_t FirstIdx = 0;
if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
FirstIdx = Offset/TySize;
if (TD && SrcElTy->isArrayTy() &&
TD->getTypeAllocSize(SrcElTy->getArrayElementType()) ==
TD->getTypeAllocSize(ResElTy)) {
- Type *IdxType = TD->getIntPtrType(GEP.getContext());
+ Type *IdxType = TD->getIntPtrType(GEP.getType());
Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
Value *NewGEP = GEP.isInBounds() ?
Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
// Earlier transforms ensure that the index has type IntPtrType, which
// considerably simplifies the logic by eliminating implicit casts.
- assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
+ assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
"Index not cast to pointer width?");
bool NSW;
// Earlier transforms ensure that the index has type IntPtrType, which
// considerably simplifies the logic by eliminating implicit casts.
- assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
+ assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
"Index not cast to pointer width?");
bool NSW;
// If the multiplication NewIdx * Scale may overflow then the new
// GEP may not be "inbounds".
Value *Off[2] = {
- Constant::getNullValue(TD->getIntPtrType(GEP.getContext())),
+ Constant::getNullValue(TD->getIntPtrType(GEP.getType())),
NewIdx
};
if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
Value *Operand = BCI->getOperand(0);
PointerType *OpType = cast<PointerType>(Operand->getType());
- unsigned OffsetBits = TD->getPointerSizeInBits();
+ unsigned OffsetBits = TD->getPointerTypeSizeInBits(OpType);
APInt Offset(OffsetBits, 0);
if (!isa<BitCastInst>(Operand) &&
GEP.accumulateConstantOffset(*TD, Offset) &&
// field at Offset in 'A's type. If so, we can pull the cast through the
// GEP.
SmallVector<Value*, 8> NewIndices;
- Type *InTy = OpType->getElementType();
- if (FindElementAtOffset(InTy, Offset.getSExtValue(), NewIndices)) {
+ if (FindElementAtOffset(OpType, Offset.getSExtValue(), NewIndices)) {
Value *NGEP = GEP.isInBounds() ?
Builder->CreateInBoundsGEP(Operand, NewIndices) :
Builder->CreateGEP(Operand, NewIndices);
; Tests to make sure elimination of casts is working correctly
; RUN: opt < %s -instcombine -S | FileCheck %s
-target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128-n8:16:32:64"
+target datalayout = "E-p:64:64:64-p1:32:32:32-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128-n8:16:32:64"
@inbuf = external global [32832 x i8] ; <[32832 x i8]*> [#uses=1]
; CHECK-NEXT: ret %s
}
+define %s @test68_as1(%s addrspace(1)* %p, i32 %i) {
+; CHECK-LABEL: @test68_as1(
+ %o = mul i32 %i, 12
+ %q = bitcast %s addrspace(1)* %p to i8 addrspace(1)*
+ %pp = getelementptr inbounds i8 addrspace(1)* %q, i32 %o
+; CHECK-NEXT: getelementptr %s addrspace(1)*
+ %r = bitcast i8 addrspace(1)* %pp to %s addrspace(1)*
+ %l = load %s addrspace(1)* %r
+; CHECK-NEXT: load %s addrspace(1)*
+ ret %s %l
+; CHECK-NEXT: ret %s
+}
+
define double @test69(double *%p, i64 %i) {
; CHECK-LABEL: @test69(
%o = shl nsw i64 %i, 3
; CHECK-NEXT: ret double
}
+define double @test80_as1([100 x double] addrspace(1)* %p, i16 %i) {
+; CHECK-LABEL: @test80_as1(
+ %tmp = mul nsw i16 %i, 8
+; CHECK-NEXT: sext i16 %i to i32
+ %q = bitcast [100 x double] addrspace(1)* %p to i8 addrspace(1)*
+ %pp = getelementptr i8 addrspace(1)* %q, i16 %tmp
+; CHECK-NEXT: getelementptr [100 x double] addrspace(1)*
+ %r = bitcast i8 addrspace(1)* %pp to double addrspace(1)*
+ %l = load double addrspace(1)* %r
+; CHECK-NEXT: load double addrspace(1)*
+ ret double %l
+; CHECK-NEXT: ret double
+}
+
define double @test81(double *%p, float %f) {
%i = fptosi float %f to i64
%q = bitcast double* %p to i8*