From: Matt Arsenault Date: Tue, 20 Aug 2013 21:20:04 +0000 (+0000) Subject: Teach ConstantFolding about pointer address spaces X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=80f495aab0b1103b880196191af56f1d1c473ea1;p=oota-llvm.git Teach ConstantFolding about pointer address spaces git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@188831 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index c063d06861c..03199629cb2 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -367,7 +367,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, if (ConstantExpr *CE = dyn_cast(C)) { if (CE->getOpcode() == Instruction::IntToPtr && - CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext())) { + CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getType())) { return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, BytesLeft, TD); } @@ -379,26 +379,29 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, const DataLayout &TD) { - Type *LoadTy = cast(C->getType())->getElementType(); + PointerType *PTy = cast(C->getType()); + Type *LoadTy = PTy->getElementType(); IntegerType *IntType = dyn_cast(LoadTy); // If this isn't an integer load we can't fold it directly. if (!IntType) { + unsigned AS = PTy->getAddressSpace(); + // If this is a float/double load, we can try folding it as an int32/64 load // and then bitcast the result. This can be useful for union cases. Note // that address spaces don't matter here since we're not going to result in // an actual new load. Type *MapTy; if (LoadTy->isHalfTy()) - MapTy = Type::getInt16PtrTy(C->getContext()); + MapTy = Type::getInt16PtrTy(C->getContext(), AS); else if (LoadTy->isFloatTy()) - MapTy = Type::getInt32PtrTy(C->getContext()); + MapTy = Type::getInt32PtrTy(C->getContext(), AS); else if (LoadTy->isDoubleTy()) - MapTy = Type::getInt64PtrTy(C->getContext()); + MapTy = Type::getInt64PtrTy(C->getContext(), AS); else if (LoadTy->isVectorTy()) { - MapTy = IntegerType::get(C->getContext(), - TD.getTypeAllocSizeInBits(LoadTy)); - MapTy = PointerType::getUnqual(MapTy); + MapTy = PointerType::getIntNPtrTy(C->getContext(), + TD.getTypeAllocSizeInBits(LoadTy), + AS); } else return 0; @@ -413,7 +416,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, return 0; GlobalValue *GVal; - APInt Offset(TD.getPointerSizeInBits(), 0); + APInt Offset(TD.getPointerTypeSizeInBits(PTy), 0); if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD)) return 0; @@ -606,8 +609,10 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, static Constant *CastGEPIndices(ArrayRef Ops, Type *ResultTy, const DataLayout *TD, const TargetLibraryInfo *TLI) { - if (!TD) return 0; - Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext()); + if (!TD) + return 0; + + Type *IntPtrTy = TD->getIntPtrType(ResultTy); bool Any = false; SmallVector NewIdxs; @@ -665,7 +670,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef Ops, !Ptr->getType()->isPointerTy()) return 0; - Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext()); + Type *IntPtrTy = TD->getIntPtrType(Ptr->getType()); Type *ResultElementTy = ResultTy->getPointerElementType(); // If this is a constant expr gep that is effectively computing an @@ -741,7 +746,8 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef Ops, // Also, this helps GlobalOpt do SROA on GlobalVariables. Type *Ty = Ptr->getType(); assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type"); - SmallVector NewIdxs; + SmallVector NewIdxs; + do { if (SequentialType *ATy = dyn_cast(Ty)) { if (ATy->isPointerTy()) { @@ -756,7 +762,6 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef Ops, // Determine which element of the array the offset points into. APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType())); - IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext()); if (ElemSize == 0) // The element size is 0. This may be [0 x Ty]*, so just use a zero // index for this level and proceed to the next level to see if it can @@ -968,7 +973,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, if (TD && CE->getOpcode() == Instruction::IntToPtr) { Constant *Input = CE->getOperand(0); unsigned InWidth = Input->getType()->getScalarSizeInBits(); - if (TD->getPointerSizeInBits() < InWidth) { + if (TD->getPointerTypeSizeInBits(CE->getType()) < InWidth) { Constant *Mask = ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth, TD->getPointerSizeInBits())); @@ -983,11 +988,19 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if // the int size is >= the ptr size. This requires knowing the width of a // pointer, so it can't be done in ConstantExpr::getCast. - if (ConstantExpr *CE = dyn_cast(Ops[0])) - if (TD && - TD->getPointerSizeInBits() <= CE->getType()->getScalarSizeInBits() && - CE->getOpcode() == Instruction::PtrToInt) - return FoldBitCast(CE->getOperand(0), DestTy, *TD); + if (ConstantExpr *CE = dyn_cast(Ops[0])) { + if (TD && CE->getOpcode() == Instruction::PtrToInt) { + Constant *SrcPtr = CE->getOperand(0); + unsigned SrcPtrSize = TD->getPointerTypeSizeInBits(SrcPtr->getType()); + unsigned MidIntSize = CE->getType()->getScalarSizeInBits(); + + if (MidIntSize >= SrcPtrSize) { + unsigned DestPtrSize = TD->getPointerTypeSizeInBits(DestTy); + if (SrcPtrSize == DestPtrSize) + return FoldBitCast(CE->getOperand(0), DestTy, *TD); + } + } + } return ConstantExpr::getCast(Opcode, Ops[0], DestTy); case Instruction::Trunc: @@ -1039,8 +1052,8 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, // around to know if bit truncation is happening. if (ConstantExpr *CE0 = dyn_cast(Ops0)) { if (TD && Ops1->isNullValue()) { - Type *IntPtrTy = TD->getIntPtrType(CE0->getContext()); if (CE0->getOpcode() == Instruction::IntToPtr) { + Type *IntPtrTy = TD->getIntPtrType(CE0->getType()); // Convert the integer value to the right size to ensure we get the // proper extension or truncation. Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), @@ -1051,19 +1064,21 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, // Only do this transformation if the int is intptrty in size, otherwise // there is a truncation or extension that we aren't modeling. - if (CE0->getOpcode() == Instruction::PtrToInt && - CE0->getType() == IntPtrTy) { - Constant *C = CE0->getOperand(0); - Constant *Null = Constant::getNullValue(C->getType()); - return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); + if (CE0->getOpcode() == Instruction::PtrToInt) { + Type *IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType()); + if (CE0->getType() == IntPtrTy) { + Constant *C = CE0->getOperand(0); + Constant *Null = Constant::getNullValue(C->getType()); + return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); + } } } if (ConstantExpr *CE1 = dyn_cast(Ops1)) { if (TD && CE0->getOpcode() == CE1->getOpcode()) { - Type *IntPtrTy = TD->getIntPtrType(CE0->getContext()); - if (CE0->getOpcode() == Instruction::IntToPtr) { + Type *IntPtrTy = TD->getIntPtrType(CE0->getType()); + // Convert the integer value to the right size to ensure we get the // proper extension or truncation. Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), @@ -1075,11 +1090,17 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, // Only do this transformation if the int is intptrty in size, otherwise // there is a truncation or extension that we aren't modeling. - if ((CE0->getOpcode() == Instruction::PtrToInt && - CE0->getType() == IntPtrTy && - CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType())) - return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), - CE1->getOperand(0), TD, TLI); + if (CE0->getOpcode() == Instruction::PtrToInt) { + Type *IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType()); + if (CE0->getType() == IntPtrTy && + CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) { + return ConstantFoldCompareInstOperands(Predicate, + CE0->getOperand(0), + CE1->getOperand(0), + TD, + TLI); + } + } } } diff --git a/test/Other/constant-fold-gep-address-spaces.ll b/test/Other/constant-fold-gep-address-spaces.ll new file mode 100644 index 00000000000..f6abe7468bb --- /dev/null +++ b/test/Other/constant-fold-gep-address-spaces.ll @@ -0,0 +1,235 @@ +; "PLAIN" - No optimizations. This tests the target-independent +; constant folder. +; RUN: opt -S -o - %s | FileCheck --check-prefix=PLAIN %s + +target datalayout = "e-p:128:128:128-p1:32:32:32-p2:8:8:8-p3:16:16:16-p4:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32" + +; The automatic constant folder in opt does not have targetdata access, so +; it can't fold gep arithmetic, in general. However, the constant folder run +; from instcombine and global opt can use targetdata. +; PLAIN: @G8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) +@G8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) +; PLAIN: @G1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -1) +@G1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -1) +; PLAIN: @F8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) +@F8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) +; PLAIN: @F1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -2) +@F1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -2) +; PLAIN: @H8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* null, i32 -1) +@H8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1) +; PLAIN: @H1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i8 -1) +@H1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 0 to i1 addrspace(2)*), i8 -1) + + +; The target-independent folder should be able to do some clever +; simplifications on sizeof, alignof, and offsetof expressions. The +; target-dependent folder should fold these down to constants. +; PLAIN-X: @a = constant i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2310) +@a = constant i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]} addrspace(4)* getelementptr ({[7 x double], [7 x double]} addrspace(4)* null, i64 11) to i64), i64 5)) + +; PLAIN-X: @b = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) +@b = constant i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64) + +; PLAIN-X: @c = constant i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2) +@c = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64) + +; PLAIN-X: @d = constant i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 11) +@d = constant i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64) + +; PLAIN-X: @e = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) +@e = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64) + +; PLAIN-X: @f = constant i64 1 +@f = constant i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64) + +; PLAIN-X: @g = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) +@g = constant i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64) + +; PLAIN-X: @h = constant i64 ptrtoint (i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 1) to i64) +@h = constant i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i64 1) to i64) + +; PLAIN-X: @i = constant i64 ptrtoint (i1 addrspace(2)* getelementptr ({ i1, i1 addrspace(2)* }* null, i64 0, i32 1) to i64) +@i = constant i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double} addrspace(4)* null, i64 0, i32 1) to i64) + +; The target-dependent folder should cast GEP indices to integer-sized pointers. + +; PLAIN: @M = constant i64 addrspace(4)* getelementptr (i64 addrspace(4)* null, i32 1) +; PLAIN: @N = constant i64 addrspace(4)* getelementptr ({ i64, i64 } addrspace(4)* null, i32 0, i32 1) +; PLAIN: @O = constant i64 addrspace(4)* getelementptr ([2 x i64] addrspace(4)* null, i32 0, i32 1) + +@M = constant i64 addrspace(4)* getelementptr (i64 addrspace(4)* null, i32 1) +@N = constant i64 addrspace(4)* getelementptr ({ i64, i64 } addrspace(4)* null, i32 0, i32 1) +@O = constant i64 addrspace(4)* getelementptr ([2 x i64] addrspace(4)* null, i32 0, i32 1) + +; Fold GEP of a GEP. Very simple cases are folded. + +; PLAIN-X: @Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 2) +@ext = external addrspace(3) global [3 x { i32, i32 }] +@Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 1), i64 1) + +; PLAIN-X: @Z = global i32addrspace(3)* getelementptr inbounds (i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 0), i64 1) +@Z = global i32addrspace(3)* getelementptr inbounds (i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 0), i64 1) + + +; Duplicate all of the above as function return values rather than +; global initializers. + +; PLAIN: define i8 addrspace(1)* @goo8() #0 { +; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)* +; PLAIN: ret i8 addrspace(1)* %t +; PLAIN: } +; PLAIN: define i1 addrspace(2)* @goo1() #0 { +; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)* +; PLAIN: ret i1 addrspace(2)* %t +; PLAIN: } +; PLAIN: define i8 addrspace(1)* @foo8() #0 { +; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)* +; PLAIN: ret i8 addrspace(1)* %t +; PLAIN: } +; PLAIN: define i1 addrspace(2)* @foo1() #0 { +; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)* +; PLAIN: ret i1 addrspace(2)* %t +; PLAIN: } +; PLAIN: define i8 addrspace(1)* @hoo8() #0 { +; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* null, i32 -1) to i8 addrspace(1)* +; PLAIN: ret i8 addrspace(1)* %t +; PLAIN: } +; PLAIN: define i1 addrspace(2)* @hoo1() #0 { +; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 -1) to i1 addrspace(2)* +; PLAIN: ret i1 addrspace(2)* %t +; PLAIN: } +define i8 addrspace(1)* @goo8() #0 { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @goo1() #0 { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} +define i8 addrspace(1)* @foo8() #0 { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @foo1() #0 { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} +define i8 addrspace(1)* @hoo8() #0 { + %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)* + ret i8 addrspace(1)* %t +} +define i1 addrspace(2)* @hoo1() #0 { + %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 0 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)* + ret i1 addrspace(2)* %t +} + +; PLAIN-X: define i64 @fa() #0 { +; PLAIN-X: %t = bitcast i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2310) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fb() #0 { +; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fc() #0 { +; PLAIN-X: %t = bitcast i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fd() #0 { +; PLAIN-X: %t = bitcast i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 11) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fe() #0 { +; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @ff() #0 { +; PLAIN-X: %t = bitcast i64 1 to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fg() #0 { +; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fh() #0 { +; PLAIN-X: %t = bitcast i64 ptrtoint (i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 1) to i64) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +; PLAIN-X: define i64 @fi() #0 { +; PLAIN-X: %t = bitcast i64 ptrtoint (i1 addrspace(2)* getelementptr ({ i1, i1 addrspace(2)* }* null, i64 0, i32 1) to i64) to i64 +; PLAIN-X: ret i64 %t +; PLAIN-X: } +define i64 @fa() #0 { + %t = bitcast i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}* null, i64 11) to i64), i64 5)) to i64 + ret i64 %t +} +define i64 @fb() #0 { + %t = bitcast i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fc() #0 { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64 + ret i64 %t +} +define i64 @fd() #0 { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64) to i64 + ret i64 %t +} +define i64 @fe() #0 { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64 + ret i64 %t +} +define i64 @ff() #0 { + %t = bitcast i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fg() #0 { + %t = bitcast i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fh() #0 { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64) to i64 + ret i64 %t +} +define i64 @fi() #0 { + %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double}addrspace(4)* null, i64 0, i32 1) to i64) to i64 + ret i64 %t +} + +; PLAIN: define i64* @fM() #0 { +; PLAIN: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64* +; PLAIN: ret i64* %t +; PLAIN: } +; PLAIN: define i64* @fN() #0 { +; PLAIN: %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64* +; PLAIN: ret i64* %t +; PLAIN: } +; PLAIN: define i64* @fO() #0 { +; PLAIN: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64* +; PLAIN: ret i64* %t +; PLAIN: } + +define i64* @fM() #0 { + %t = bitcast i64* getelementptr (i64* null, i32 1) to i64* + ret i64* %t +} +define i64* @fN() #0 { + %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64* + ret i64* %t +} +define i64* @fO() #0 { + %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64* + ret i64* %t +} + +; PLAIN: define i32 addrspace(1)* @fZ() #0 { +; PLAIN: %t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)* +; PLAIN: ret i32 addrspace(1)* %t +; PLAIN: } +@ext2 = external addrspace(1) global [3 x { i32, i32 }] +define i32 addrspace(1)* @fZ() #0 { + %t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)* + ret i32 addrspace(1)* %t +} + +attributes #0 = { nounwind } diff --git a/test/Transforms/ConstProp/loads.ll b/test/Transforms/ConstProp/loads.ll index 795dc07111f..d05db47dcaa 100644 --- a/test/Transforms/ConstProp/loads.ll +++ b/test/Transforms/ConstProp/loads.ll @@ -1,5 +1,5 @@ -; RUN: opt < %s -default-data-layout="e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64" -instcombine -S | FileCheck %s --check-prefix=LE -; RUN: opt < %s -default-data-layout="E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64" -instcombine -S | FileCheck %s --check-prefix=BE +; RUN: opt < %s -default-data-layout="e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64" -instcombine -S | FileCheck %s --check-prefix=LE +; RUN: opt < %s -default-data-layout="E-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64" -instcombine -S | FileCheck %s --check-prefix=BE ; {{ 0xDEADBEEF, 0xBA }, 0xCAFEBABE} @g1 = constant {{i32,i8},i32} {{i32,i8} { i32 -559038737, i8 186 }, i32 -889275714 } @@ -155,7 +155,7 @@ entry: @test12g = private constant [6 x i8] c"a\00b\00\00\00" define i16 @test12() { - %a = load i16* getelementptr inbounds ([3 x i16]* bitcast ([6 x i8]* @test12g to [3 x i16]*), i32 0, i64 1) + %a = load i16* getelementptr inbounds ([3 x i16]* bitcast ([6 x i8]* @test12g to [3 x i16]*), i32 0, i64 1) ret i16 %a ; 0x0062 @@ -194,6 +194,20 @@ entry: ; BE: ret i64 1 } +; Check with address space pointers +@g6_as1 = constant [2 x i8 addrspace(1)*] [i8 addrspace(1)* inttoptr (i16 1 to i8 addrspace(1)*), i8 addrspace(1)* inttoptr (i16 2 to i8 addrspace(1)*)] +define i16 @test14_as1() nounwind { +entry: + %tmp = load i16* bitcast ([2 x i8 addrspace(1)*]* @g6_as1 to i16*) + ret i16 %tmp + +; LE: @test14_as1 +; LE: ret i16 1 + +; BE: @test14_as1 +; BE: ret i16 1 +} + define i64 @test15() nounwind { entry: %tmp = load i64* bitcast (i8** getelementptr inbounds ([2 x i8*]* @g6, i32 0, i64 1) to i64*) diff --git a/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll b/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll new file mode 100644 index 00000000000..9b17847cbd2 --- /dev/null +++ b/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll @@ -0,0 +1,224 @@ +; RUN: opt -S -instcombine %s -o - | FileCheck %s +target datalayout = "e-p:32:32:32-p1:64:64:64-p2:8:8:8-p3:16:16:16-p4:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32" + +@g = addrspace(3) global i32 89 + +@const_zero_i8_as1 = addrspace(1) constant i8 0 +@const_zero_i32_as1 = addrspace(1) constant i32 0 + +@const_zero_i8_as2 = addrspace(2) constant i8 0 +@const_zero_i32_as2 = addrspace(2) constant i32 0 + +@const_zero_i8_as3 = addrspace(3) constant i8 0 +@const_zero_i32_as3 = addrspace(3) constant i32 0 + +; Test constant folding of inttoptr (ptrtoint constantexpr) +; The intermediate integer size is the same as the pointer size +define i32 addrspace(3)* @test_constant_fold_inttoptr_as_pointer_same_size() { +; CHECK-LABEL: @test_constant_fold_inttoptr_as_pointer_same_size( +; CHECK-NEXT: ret i32 addrspace(3)* @const_zero_i32_as3 + %x = ptrtoint i32 addrspace(3)* @const_zero_i32_as3 to i32 + %y = inttoptr i32 %x to i32 addrspace(3)* + ret i32 addrspace(3)* %y +} + +; The intermediate integer size is larger than the pointer size +define i32 addrspace(2)* @test_constant_fold_inttoptr_as_pointer_smaller() { +; CHECK-LABEL: @test_constant_fold_inttoptr_as_pointer_smaller( +; CHECK-NEXT: ret i32 addrspace(2)* @const_zero_i32_as2 + %x = ptrtoint i32 addrspace(2)* @const_zero_i32_as2 to i16 + %y = inttoptr i16 %x to i32 addrspace(2)* + ret i32 addrspace(2)* %y +} + +; Different address spaces that are the same size, but they are +; different so there should be a bitcast. +define i32 addrspace(4)* @test_constant_fold_inttoptr_as_pointer_smaller_different_as() { +; CHECK-LABEL: @test_constant_fold_inttoptr_as_pointer_smaller_different_as( +; CHECK-NEXT: ret i32 addrspace(4)* bitcast (i32 addrspace(3)* @const_zero_i32_as3 to i32 addrspace(4)*) + %x = ptrtoint i32 addrspace(3)* @const_zero_i32_as3 to i16 + %y = inttoptr i16 %x to i32 addrspace(4)* + ret i32 addrspace(4)* %y +} + +; Make sure we don't introduce a bitcast between different sized +; address spaces when folding this +define i32 addrspace(2)* @test_constant_fold_inttoptr_as_pointer_smaller_different_size_as() { +; CHECK-LABEL: @test_constant_fold_inttoptr_as_pointer_smaller_different_size_as( +; CHECK-NEXT: ret i32 addrspace(2)* inttoptr (i32 ptrtoint (i32 addrspace(3)* @const_zero_i32_as3 to i32) to i32 addrspace(2)*) + %x = ptrtoint i32 addrspace(3)* @const_zero_i32_as3 to i32 + %y = inttoptr i32 %x to i32 addrspace(2)* + ret i32 addrspace(2)* %y +} + +; The intermediate integer size is too small, nothing should happen +define i32 addrspace(3)* @test_constant_fold_inttoptr_as_pointer_larger() { +; CHECK-LABEL: @test_constant_fold_inttoptr_as_pointer_larger( +; CHECK-NEXT: ret i32 addrspace(3)* inttoptr (i8 ptrtoint (i32 addrspace(3)* @const_zero_i32_as3 to i8) to i32 addrspace(3)*) + %x = ptrtoint i32 addrspace(3)* @const_zero_i32_as3 to i8 + %y = inttoptr i8 %x to i32 addrspace(3)* + ret i32 addrspace(3)* %y +} + +define i8 @const_fold_ptrtoint() { +; CHECK-LABEL: @const_fold_ptrtoint( +; CHECK-NEXT: ret i8 4 + ret i8 ptrtoint (i32 addrspace(2)* inttoptr (i4 4 to i32 addrspace(2)*) to i8) +} + +; Test that mask happens when the destination pointer is smaller than +; the original +define i8 @const_fold_ptrtoint_mask() { +; CHECK-LABEL: @const_fold_ptrtoint_mask( +; CHECK-NEXT: ret i8 1 + ret i8 ptrtoint (i32 addrspace(3)* inttoptr (i32 257 to i32 addrspace(3)*) to i8) +} + +define i32 addrspace(3)* @const_inttoptr() { +; CHECK-LABEL: @const_inttoptr( +; CHECK-NEXT: ret i32 addrspace(3)* inttoptr (i16 4 to i32 addrspace(3)*) + %p = inttoptr i16 4 to i32 addrspace(3)* + ret i32 addrspace(3)* %p +} + +define i16 @const_ptrtoint() { +; CHECK-LABEL: @const_ptrtoint( +; CHECK-NEXT: ret i16 ptrtoint (i32 addrspace(3)* @g to i16) + %i = ptrtoint i32 addrspace(3)* @g to i16 + ret i16 %i +} + +define i16 @const_inttoptr_ptrtoint() { +; CHECK-LABEL: @const_inttoptr_ptrtoint( +; CHECK-NEXT: ret i16 9 + ret i16 ptrtoint (i32 addrspace(3)* inttoptr (i16 9 to i32 addrspace(3)*) to i16) +} + +define i1 @constant_fold_cmp_constantexpr_inttoptr() { +; CHECK-LABEL: @constant_fold_cmp_constantexpr_inttoptr( +; CHECK-NEXT: ret i1 true + %x = icmp eq i32 addrspace(3)* inttoptr (i16 0 to i32 addrspace(3)*), null + ret i1 %x +} + +define i1 @constant_fold_inttoptr_null(i16 %i) { +; CHECK-LABEL: @constant_fold_inttoptr_null( +; CHECK-NEXT: ret i1 false + %x = icmp eq i32 addrspace(3)* inttoptr (i16 99 to i32 addrspace(3)*), inttoptr (i16 0 to i32 addrspace(3)*) + ret i1 %x +} + +define i1 @constant_fold_ptrtoint_null() { +; CHECK-LABEL: @constant_fold_ptrtoint_null( +; CHECK-NEXT: ret i1 false + %x = icmp eq i16 ptrtoint (i32 addrspace(3)* @g to i16), ptrtoint (i32 addrspace(3)* null to i16) + ret i1 %x +} + +define i1 @constant_fold_ptrtoint_null_2() { +; CHECK-LABEL: @constant_fold_ptrtoint_null_2( +; CHECK-NEXT: ret i1 false + %x = icmp eq i16 ptrtoint (i32 addrspace(3)* null to i16), ptrtoint (i32 addrspace(3)* @g to i16) + ret i1 %x +} + +define i1 @constant_fold_ptrtoint() { +; CHECK-LABEL: @constant_fold_ptrtoint( +; CHECK-NEXT: ret i1 true + %x = icmp eq i16 ptrtoint (i32 addrspace(3)* @g to i16), ptrtoint (i32 addrspace(3)* @g to i16) + ret i1 %x +} + +define i1 @constant_fold_inttoptr() { +; CHECK-LABEL: @constant_fold_inttoptr( +; CHECK-NEXT: ret i1 false + %x = icmp eq i32 addrspace(3)* inttoptr (i16 99 to i32 addrspace(3)*), inttoptr (i16 27 to i32 addrspace(3)*) + ret i1 %x +} + +@g_float_as3 = addrspace(3) global float zeroinitializer +@g_v4f_as3 = addrspace(3) global <4 x float> zeroinitializer + +define float @constant_fold_bitcast_ftoi_load() { +; CHECK-LABEL: @constant_fold_bitcast_ftoi_load( +; CHECK: load float addrspace(3)* bitcast (i32 addrspace(3)* @g to float addrspace(3)*), align 4 + %a = load float addrspace(3)* bitcast (i32 addrspace(3)* @g to float addrspace(3)*), align 4 + ret float %a +} + +define i32 @constant_fold_bitcast_itof_load() { +; CHECK-LABEL: @constant_fold_bitcast_itof_load( +; CHECK: load i32 addrspace(3)* bitcast (float addrspace(3)* @g_float_as3 to i32 addrspace(3)*), align 4 + %a = load i32 addrspace(3)* bitcast (float addrspace(3)* @g_float_as3 to i32 addrspace(3)*), align 4 + ret i32 %a +} + +define <4 x i32> @constant_fold_bitcast_vector_as() { +; CHECK-LABEL: @constant_fold_bitcast_vector_as( +; CHECK: load <4 x float> addrspace(3)* @g_v4f_as3, align 16 +; CHECK: bitcast <4 x float> %1 to <4 x i32> + %a = load <4 x i32> addrspace(3)* bitcast (<4 x float> addrspace(3)* @g_v4f_as3 to <4 x i32> addrspace(3)*), align 4 + ret <4 x i32> %a +} + +@i32_array_as3 = addrspace(3) global [10 x i32] zeroinitializer + +define i32 @test_cast_gep_small_indices_as() { +; CHECK-LABEL: @test_cast_gep_small_indices_as( +; CHECK: load i32 addrspace(3)* getelementptr inbounds ([10 x i32] addrspace(3)* @i32_array_as3, i16 0, i16 0), align 16 + %p = getelementptr [10 x i32] addrspace(3)* @i32_array_as3, i7 0, i7 0 + %x = load i32 addrspace(3)* %p, align 4 + ret i32 %x +} + +%struct.foo = type { float, float, [4 x i32], i32 addrspace(3)* } + +@constant_fold_global_ptr = addrspace(3) global %struct.foo { + float 0.0, + float 0.0, + [4 x i32] zeroinitializer, + i32 addrspace(3)* getelementptr ([10 x i32] addrspace(3)* @i32_array_as3, i64 0, i64 0) +} + +define i32 @test_cast_gep_large_indices_as() { +; CHECK-LABEL: @test_cast_gep_large_indices_as( +; CHECK: load i32 addrspace(3)* getelementptr inbounds ([10 x i32] addrspace(3)* @i32_array_as3, i16 0, i16 0), align 16 + %p = getelementptr [10 x i32] addrspace(3)* @i32_array_as3, i64 0, i64 0 + %x = load i32 addrspace(3)* %p, align 4 + ret i32 %x +} + +define i32 @test_constant_cast_gep_struct_indices_as() { +; CHECK-LABEL: @test_constant_cast_gep_struct_indices_as( +; CHECK: load i32 addrspace(3)* getelementptr inbounds (%struct.foo addrspace(3)* @constant_fold_global_ptr, i16 0, i32 2, i16 2), align 8 + %x = getelementptr %struct.foo addrspace(3)* @constant_fold_global_ptr, i18 0, i32 2, i12 2 + %y = load i32 addrspace(3)* %x, align 4 + ret i32 %y +} + +@constant_data_as3 = addrspace(3) constant [5 x i32] [i32 1, i32 2, i32 3, i32 4, i32 5] + +define i32 @test_read_data_from_global_as3() { +; CHECK-LABEL: @test_read_data_from_global_as3( +; CHECK-NEXT: ret i32 2 + %x = getelementptr [5 x i32] addrspace(3)* @constant_data_as3, i32 0, i32 1 + %y = load i32 addrspace(3)* %x, align 4 + ret i32 %y +} + +@a = addrspace(1) constant i32 9 +@b = addrspace(1) constant i32 23 +@c = addrspace(1) constant i32 34 +@d = addrspace(1) constant i32 99 + +@ptr_array = addrspace(2) constant [4 x i32 addrspace(1)*] [ i32 addrspace(1)* @a, i32 addrspace(1)* @b, i32 addrspace(1)* @c, i32 addrspace(1)* @d] +@indirect = addrspace(0) constant i32 addrspace(1)* addrspace(2)* getelementptr inbounds ([4 x i32 addrspace(1)*] addrspace(2)* @ptr_array, i1 0, i32 2) + +define i32 @constant_through_array_as_ptrs() { +; CHECK-LABEL: @constant_through_array_as_ptrs( +; CHECK-NEXT: ret i32 34 + %p = load i32 addrspace(1)* addrspace(2)* addrspace(0)* @indirect, align 4 + %a = load i32 addrspace(1)* addrspace(2)* %p, align 4 + %b = load i32 addrspace(1)* %a, align 4 + ret i32 %b +} diff --git a/test/Transforms/InstCombine/getelementptr.ll b/test/Transforms/InstCombine/getelementptr.ll index 92be87c58f8..cd7a92550cb 100644 --- a/test/Transforms/InstCombine/getelementptr.ll +++ b/test/Transforms/InstCombine/getelementptr.ll @@ -1,6 +1,7 @@ ; RUN: opt < %s -instcombine -S | FileCheck %s -target datalayout = "e-p:64:64" +target datalayout = "e-p:64:64-p1:16:16-p2:32:32:32" + %intstruct = type { i32 } %pair = type { i32, i32 } %struct.B = type { double } @@ -8,6 +9,7 @@ target datalayout = "e-p:64:64" @Global = constant [10 x i8] c"helloworld" +@Global_as1 = addrspace(1) constant [10 x i8] c"helloworld" ; Test noop elimination define i32* @test1(i32* %I) { @@ -17,6 +19,13 @@ define i32* @test1(i32* %I) { ; CHECK: ret i32* %I } +define i32 addrspace(1)* @test1_as1(i32 addrspace(1)* %I) { + %A = getelementptr i32 addrspace(1)* %I, i64 0 + ret i32 addrspace(1)* %A +; CHECK-LABEL: @test1_as1( +; CHECK: ret i32 addrspace(1)* %I +} + ; Test noop elimination define i32* @test2(i32* %I) { %A = getelementptr i32* %I @@ -52,6 +61,42 @@ define void @test5(i8 %B) { ; CHECK: store i8 %B, i8* getelementptr inbounds ([10 x i8]* @Global, i64 0, i64 4) } +define void @test5_as1(i8 %B) { + ; This should be turned into a constexpr instead of being an instruction + %A = getelementptr [10 x i8] addrspace(1)* @Global_as1, i16 0, i16 4 + store i8 %B, i8 addrspace(1)* %A + ret void +; CHECK-LABEL: @test5_as1( +; CHECK: store i8 %B, i8 addrspace(1)* getelementptr inbounds ([10 x i8] addrspace(1)* @Global_as1, i16 0, i16 4) +} + +%as1_ptr_struct = type { i32 addrspace(1)* } +%as2_ptr_struct = type { i32 addrspace(2)* } + +@global_as2 = addrspace(2) global i32 zeroinitializer +@global_as1_as2_ptr = addrspace(1) global %as2_ptr_struct { i32 addrspace(2)* @global_as2 } + +; This should be turned into a constexpr instead of being an instruction +define void @test_evaluate_gep_nested_as_ptrs(i32 addrspace(2)* %B) { +; CHECK-LABEL: @test_evaluate_gep_nested_as_ptrs( +; CHECK-NEXT: store i32 addrspace(2)* %B, i32 addrspace(2)* addrspace(1)* getelementptr inbounds (%as2_ptr_struct addrspace(1)* @global_as1_as2_ptr, i16 0, i32 0), align 8 +; CHECK-NEXT: ret void + %A = getelementptr %as2_ptr_struct addrspace(1)* @global_as1_as2_ptr, i16 0, i32 0 + store i32 addrspace(2)* %B, i32 addrspace(2)* addrspace(1)* %A + ret void +} + +@arst = addrspace(1) global [4 x i8 addrspace(2)*] zeroinitializer + +define void @test_evaluate_gep_as_ptrs_array(i8 addrspace(2)* %B) { +; CHECK-LABEL: @test_evaluate_gep_as_ptrs_array( +; CHECK-NEXT: store i8 addrspace(2)* %B, i8 addrspace(2)* addrspace(1)* getelementptr inbounds ([4 x i8 addrspace(2)*] addrspace(1)* @arst, i16 0, i16 2), align 4 + +; CHECK-NEXT: ret void + %A = getelementptr [4 x i8 addrspace(2)*] addrspace(1)* @arst, i16 0, i16 2 + store i8 addrspace(2)* %B, i8 addrspace(2)* addrspace(1)* %A + ret void +} define i32* @test7(i32* %I, i64 %C, i64 %D) { %A = getelementptr i32* %I, i64 %C @@ -259,6 +304,15 @@ define i32 @test20(i32* %P, i32 %A, i32 %B) { ; CHECK: icmp eq i32 %A, 0 } +define i32 @test20_as1(i32 addrspace(1)* %P, i32 %A, i32 %B) { + %tmp.4 = getelementptr inbounds i32 addrspace(1)* %P, i32 %A + %tmp.6 = icmp eq i32 addrspace(1)* %tmp.4, %P + %tmp.7 = zext i1 %tmp.6 to i32 + ret i32 %tmp.7 +; CHECK-LABEL: @test20_as1( +; CHECK: icmp eq i16 %1, 0 +} + define i32 @test21() { %pbob1 = alloca %intstruct