namespace AttributeFuncs {
/// \brief Which attributes cannot be applied to a type.
-AttrBuilder typeIncompatible(const Type *Ty);
+AttrBuilder typeIncompatible(Type *Ty);
} // end AttributeFuncs namespace
/// formed with a vector or array of the specified element type.
/// ConstantDataArray only works with normal float and int types that are
/// stored densely in memory, not with things like i42 or x86_f80.
- static bool isElementTypeCompatible(const Type *Ty);
+ static bool isElementTypeCompatible(Type *Ty);
/// getElementAsInteger - If this is a sequential container of integers (of
/// any size), return the specified element in the low bits of a uint64_t.
bool isOpaque() const { return (getSubclassData() & SCDB_HasBody) == 0; }
/// isSized - Return true if this is a sized type.
- bool isSized(SmallPtrSetImpl<const Type*> *Visited = nullptr) const;
+ bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const;
/// hasName - Return true if this is a named struct that has a non-empty name.
bool hasName() const { return SymbolTableEntry != nullptr; }
/// get the actual size for a particular target, it is reasonable to use the
/// DataLayout subsystem to do this.
///
- bool isSized(SmallPtrSetImpl<const Type*> *Visited = nullptr) const {
+ bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const {
// If it's a primitive, it is always sized.
if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
getTypeID() == PointerTyID ||
/// isSizedDerivedType - Derived types like structures and arrays are sized
/// iff all of the members of the type are sized as well. Since asking for
/// their size is relatively uncommon, move this operation out of line.
- bool isSizedDerivedType(SmallPtrSetImpl<const Type*> *Visited = nullptr) const;
+ bool isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited = nullptr) const;
};
// Printing of types.
/// \brief Check whether the access through \p Ptr has a constant stride.
int llvm::isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp,
const ValueToValueMap &StridesMap) {
- const Type *Ty = Ptr->getType();
+ Type *Ty = Ptr->getType();
assert(Ty->isPointerTy() && "Unexpected non-ptr");
// Make sure that the pointer does not point to aggregate types.
- const PointerType *PtrTy = cast<PointerType>(Ty);
+ auto *PtrTy = cast<PointerType>(Ty);
if (PtrTy->getElementType()->isAggregateType()) {
DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type"
<< *Ptr << "\n");
/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
llvm::Value *llvm::getStrideFromPointer(llvm::Value *Ptr, ScalarEvolution *SE,
Loop *Lp) {
- const PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
+ auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
if (!PtrTy || PtrTy->isAggregateType())
return nullptr;
}
}
- SmallPtrSet<const Type*, 4> Visited;
+ SmallPtrSet<Type*, 4> Visited;
if (!Indices.empty() && !Ty->isSized(&Visited))
return Error(ID.Loc, "base element of getelementptr must be sized");
Indices.push_back(Val);
}
- SmallPtrSet<const Type*, 4> Visited;
+ SmallPtrSet<Type*, 4> Visited;
if (!Indices.empty() && !Ty->isSized(&Visited))
return Error(Loc, "base element of getelementptr must be sized");
break;
case Type::VectorTyID:
// if the whole vector is 'undef' just reserve memory for the value.
- const VectorType* VTy = dyn_cast<VectorType>(C->getType());
- const Type *ElemTy = VTy->getElementType();
+ auto* VTy = dyn_cast<VectorType>(C->getType());
+ Type *ElemTy = VTy->getElementType();
unsigned int elemNum = VTy->getNumElements();
Result.AggregateVal.resize(elemNum);
if (ElemTy->isIntegerTy())
break;
}
case Type::VectorTyID: {
- const VectorType *VT = cast<VectorType>(Ty);
- const Type *ElemT = VT->getElementType();
+ auto *VT = cast<VectorType>(Ty);
+ Type *ElemT = VT->getElementType();
const unsigned numElems = VT->getNumElements();
if (ElemT->isFloatTy()) {
Result.AggregateVal.resize(numElems);
}
static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
- const Type *Ty, const bool val) {
+ Type *Ty, const bool val) {
GenericValue Dest;
if(Ty->isVectorTy()) {
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
}
static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
- GenericValue Src3, const Type *Ty) {
+ GenericValue Src3, Type *Ty) {
GenericValue Dest;
if(Ty->isVectorTy()) {
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
void Interpreter::visitSelectInst(SelectInst &I) {
ExecutionContext &SF = ECStack.back();
- const Type * Ty = I.getOperand(0)->getType();
+ Type * Ty = I.getOperand(0)->getType();
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Dest;
- const Type *Ty = I.getType();
+ Type *Ty = I.getType();
if (Ty->isVectorTy()) {
uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Dest;
- const Type *Ty = I.getType();
+ Type *Ty = I.getType();
if (Ty->isVectorTy()) {
uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Dest;
- const Type *Ty = I.getType();
+ Type *Ty = I.getType();
if (Ty->isVectorTy()) {
size_t src1Size = Src1.AggregateVal.size();
GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
- const Type *SrcTy = SrcVal->getType();
+ Type *SrcTy = SrcVal->getType();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcTy->isVectorTy()) {
- const Type *DstVecTy = DstTy->getScalarType();
+ Type *DstVecTy = DstTy->getScalarType();
unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal.
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
} else {
- const IntegerType *DITy = cast<IntegerType>(DstTy);
+ auto *DITy = cast<IntegerType>(DstTy);
unsigned DBitWidth = DITy->getBitWidth();
Dest.IntVal = Src.IntVal.sext(DBitWidth);
}
GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
- const Type *SrcTy = SrcVal->getType();
+ Type *SrcTy = SrcVal->getType();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcTy->isVectorTy()) {
- const Type *DstVecTy = DstTy->getScalarType();
+ Type *DstVecTy = DstTy->getScalarType();
unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
unsigned size = Src.AggregateVal.size();
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
} else {
- const IntegerType *DITy = cast<IntegerType>(DstTy);
+ auto *DITy = cast<IntegerType>(DstTy);
unsigned DBitWidth = DITy->getBitWidth();
Dest.IntVal = Src.IntVal.zext(DBitWidth);
}
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcTy->getTypeID() == Type::VectorTyID) {
- const Type *DstVecTy = DstTy->getScalarType();
- const Type *SrcVecTy = SrcTy->getScalarType();
+ Type *DstVecTy = DstTy->getScalarType();
+ Type *SrcVecTy = SrcTy->getScalarType();
uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal.
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcTy->getTypeID() == Type::VectorTyID) {
- const Type *DstVecTy = DstTy->getScalarType();
- const Type *SrcVecTy = SrcTy->getScalarType();
+ Type *DstVecTy = DstTy->getScalarType();
+ Type *SrcVecTy = SrcTy->getScalarType();
uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
- const Type *DstVecTy = DstTy->getScalarType();
+ Type *DstVecTy = DstTy->getScalarType();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal
Dest.AggregateVal.resize(size);
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
- const Type *DstVecTy = DstTy->getScalarType();
+ Type *DstVecTy = DstTy->getScalarType();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal
Dest.AggregateVal.resize(size);
// scalar src bitcast to vector dst
bool isLittleEndian = getDataLayout().isLittleEndian();
GenericValue TempDst, TempSrc, SrcVec;
- const Type *SrcElemTy;
- const Type *DstElemTy;
+ Type *SrcElemTy;
+ Type *DstElemTy;
unsigned SrcBitSize;
unsigned DstBitSize;
unsigned SrcNum;
//===----------------------------------------------------------------------===//
/// \brief Which attributes cannot be applied to a type.
-AttrBuilder AttributeFuncs::typeIncompatible(const Type *Ty) {
+AttrBuilder AttributeFuncs::typeIncompatible(Type *Ty) {
AttrBuilder Incompatible;
if (!Ty->isIntegerTy())
}
/// \brief Test whether a given ConstantInt is in-range for a SequentialType.
-static bool isIndexInRangeOfSequentialType(const SequentialType *STy,
+static bool isIndexInRangeOfSequentialType(SequentialType *STy,
const ConstantInt *CI) {
- if (const PointerType *PTy = dyn_cast<PointerType>(STy))
+ if (auto *PTy = dyn_cast<PointerType>(STy))
// Only handle pointers to sized types, not pointers to functions.
return PTy->getElementType()->isSized();
uint64_t NumElements = 0;
// Determine the number of elements in our sequential type.
- if (const ArrayType *ATy = dyn_cast<ArrayType>(STy))
+ if (auto *ATy = dyn_cast<ArrayType>(STy))
NumElements = ATy->getNumElements();
- else if (const VectorType *VTy = dyn_cast<VectorType>(STy))
+ else if (auto *VTy = dyn_cast<VectorType>(STy))
NumElements = VTy->getNumElements();
assert((isa<ArrayType>(STy) || NumElements > 0) &&
// dimension.
NewIdxs.resize(Idxs.size());
uint64_t NumElements = 0;
- if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty))
+ if (auto *ATy = dyn_cast<ArrayType>(Ty))
NumElements = ATy->getNumElements();
else
NumElements = cast<VectorType>(Ty)->getNumElements();
}
unsigned ConstantAggregateZero::getNumElements() const {
- const Type *Ty = getType();
- if (const auto *AT = dyn_cast<ArrayType>(Ty))
+ Type *Ty = getType();
+ if (auto *AT = dyn_cast<ArrayType>(Ty))
return AT->getNumElements();
- if (const auto *VT = dyn_cast<VectorType>(Ty))
+ if (auto *VT = dyn_cast<VectorType>(Ty))
return VT->getNumElements();
return Ty->getStructNumElements();
}
}
unsigned UndefValue::getNumElements() const {
- const Type *Ty = getType();
- if (const auto *AT = dyn_cast<ArrayType>(Ty))
+ Type *Ty = getType();
+ if (auto *AT = dyn_cast<ArrayType>(Ty))
return AT->getNumElements();
- if (const auto *VT = dyn_cast<VectorType>(Ty))
+ if (auto *VT = dyn_cast<VectorType>(Ty))
return VT->getNumElements();
return Ty->getStructNumElements();
}
/// formed with a vector or array of the specified element type.
/// ConstantDataArray only works with normal float and int types that are
/// stored densely in memory, not with things like i42 or x86_f80.
-bool ConstantDataSequential::isElementTypeCompatible(const Type *Ty) {
+bool ConstantDataSequential::isElementTypeCompatible(Type *Ty) {
if (Ty->isFloatTy() || Ty->isDoubleTy()) return true;
- if (const IntegerType *IT = dyn_cast<IntegerType>(Ty)) {
+ if (auto *IT = dyn_cast<IntegerType>(Ty)) {
switch (IT->getBitWidth()) {
case 8:
case 16:
// Vector -> Vector conversions are always lossless if the two vector types
// have the same size, otherwise not. Also, 64-bit vector types can be
// converted to x86mmx.
- if (const VectorType *thisPTy = dyn_cast<VectorType>(this)) {
- if (const VectorType *thatPTy = dyn_cast<VectorType>(Ty))
+ if (auto *thisPTy = dyn_cast<VectorType>(this)) {
+ if (auto *thatPTy = dyn_cast<VectorType>(Ty))
return thisPTy->getBitWidth() == thatPTy->getBitWidth();
if (Ty->getTypeID() == Type::X86_MMXTyID &&
thisPTy->getBitWidth() == 64)
}
if (this->getTypeID() == Type::X86_MMXTyID)
- if (const VectorType *thatPTy = dyn_cast<VectorType>(Ty))
+ if (auto *thatPTy = dyn_cast<VectorType>(Ty))
if (thatPTy->getBitWidth() == 64)
return true;
// remaining and ptr->ptr. Just select the lossless conversions. Everything
// else is not lossless. Conservatively assume we can't losslessly convert
// between pointers with different address spaces.
- if (const PointerType *PTy = dyn_cast<PointerType>(this)) {
- if (const PointerType *OtherPTy = dyn_cast<PointerType>(Ty))
+ if (auto *PTy = dyn_cast<PointerType>(this)) {
+ if (auto *OtherPTy = dyn_cast<PointerType>(Ty))
return PTy->getAddressSpace() == OtherPTy->getAddressSpace();
return false;
}
}
bool Type::isEmptyTy() const {
- const ArrayType *ATy = dyn_cast<ArrayType>(this);
- if (ATy) {
+ if (auto *ATy = dyn_cast<ArrayType>(this)) {
unsigned NumElements = ATy->getNumElements();
return NumElements == 0 || ATy->getElementType()->isEmptyTy();
}
- const StructType *STy = dyn_cast<StructType>(this);
- if (STy) {
+ if (auto *STy = dyn_cast<StructType>(this)) {
unsigned NumElements = STy->getNumElements();
for (unsigned i = 0; i < NumElements; ++i)
if (!STy->getElementType(i)->isEmptyTy())
/// is only valid on floating point types. If the FP type does not
/// have a stable mantissa (e.g. ppc long double), this method returns -1.
int Type::getFPMantissaWidth() const {
- if (const VectorType *VTy = dyn_cast<VectorType>(this))
+ if (auto *VTy = dyn_cast<VectorType>(this))
return VTy->getElementType()->getFPMantissaWidth();
assert(isFloatingPointTy() && "Not a floating point type!");
if (getTypeID() == HalfTyID) return 11;
/// isSizedDerivedType - Derived types like structures and arrays are sized
/// iff all of the members of the type are sized as well. Since asking for
/// their size is relatively uncommon, move this operation out of line.
-bool Type::isSizedDerivedType(SmallPtrSetImpl<const Type*> *Visited) const {
- if (const ArrayType *ATy = dyn_cast<ArrayType>(this))
+bool Type::isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited) const {
+ if (auto *ATy = dyn_cast<ArrayType>(this))
return ATy->getElementType()->isSized(Visited);
- if (const VectorType *VTy = dyn_cast<VectorType>(this))
+ if (auto *VTy = dyn_cast<VectorType>(this))
return VTy->getElementType()->isSized(Visited);
return cast<StructType>(this)->isSized(Visited);
return Ret;
}
-bool StructType::isSized(SmallPtrSetImpl<const Type*> *Visited) const {
+bool StructType::isSized(SmallPtrSetImpl<Type*> *Visited) const {
if ((getSubclassData() & SCDB_IsSized) != 0)
return true;
if (isOpaque())
return false;
- if (Visited && !Visited->insert(this).second)
+ if (Visited && !Visited->insert(const_cast<StructType*>(this)).second)
return false;
// Okay, our struct is sized if all of the elements are, but if one of the
return cast<SequentialType>(this)->getElementType();
}
bool CompositeType::indexValid(const Value *V) const {
- if (const StructType *STy = dyn_cast<StructType>(this)) {
+ if (auto *STy = dyn_cast<StructType>(this)) {
// Structure indexes require (vectors of) 32-bit integer constants. In the
// vector case all of the indices must be equal.
if (!V->getType()->getScalarType()->isIntegerTy(32))
}
bool CompositeType::indexValid(unsigned Idx) const {
- if (const StructType *STy = dyn_cast<StructType>(this))
+ if (auto *STy = dyn_cast<StructType>(this))
return Idx < STy->getNumElements();
// Sequential types can be indexed by any integer.
return true;
V);
if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
- SmallPtrSet<const Type*, 4> Visited;
+ SmallPtrSet<Type*, 4> Visited;
if (!PTy->getElementType()->isSized(&Visited)) {
Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) &&
!Attrs.hasAttribute(Idx, Attribute::InAlloca),
&CI);
const Value *Target = CS.getArgument(2);
- const PointerType *PT = dyn_cast<PointerType>(Target->getType());
+ auto *PT = dyn_cast<PointerType>(Target->getType());
Assert(PT && PT->getElementType()->isFunctionTy(),
"gc.statepoint callee must be of function pointer type", &CI, Target);
FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
}
void Verifier::visitAllocaInst(AllocaInst &AI) {
- SmallPtrSet<const Type*, 4> Visited;
+ SmallPtrSet<Type*, 4> Visited;
PointerType *PTy = AI.getType();
Assert(PTy->getAddressSpace() == 0,
"Allocation instruction pointer not in the generic address space!",
// Assert that result type matches wrapped callee.
const Value *Target = StatepointCS.getArgument(2);
- const PointerType *PT = cast<PointerType>(Target->getType());
- const FunctionType *TargetFuncType =
- cast<FunctionType>(PT->getElementType());
+ auto *PT = cast<PointerType>(Target->getType());
+ auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
Assert(CS.getType() == TargetFuncType->getReturnType(),
"gc.result result type does not match wrapped callee", CS);
break;
U = C;
}
- if (const PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
+ if (auto *Ty = dyn_cast<PointerType>(Obj->getType()))
if (Ty->getAddressSpace() > 255)
// Fast instruction selection doesn't support the special
// address spaces.
bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
- const FunctionType *FTy = F.getFunctionType();
+ FunctionType *FTy = F.getFunctionType();
LocalMemAvailable = ST.getLocalMemorySize();
// possible these arguments require the entire local memory space, so
// we cannot use local memory in the pass.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
- const Type *ParamTy = FTy->getParamType(i);
+ Type *ParamTy = FTy->getParamType(i);
if (ParamTy->isPointerTy() &&
ParamTy->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
LocalMemAvailable = 0;
return false;
}
-static VectorType *arrayTypeToVecType(const Type *ArrayTy) {
+static VectorType *arrayTypeToVecType(Type *ArrayTy) {
return VectorType::get(ArrayTy->getArrayElementType(),
ArrayTy->getArrayNumElements());
}
Offset, Ins[i].Flags.isSExt());
Chains.push_back(Arg.getValue(1));
- const PointerType *ParamTy =
+ auto *ParamTy =
dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
uint64_t &Members) {
- if (const StructType *ST = dyn_cast<StructType>(Ty)) {
+ if (auto *ST = dyn_cast<StructType>(Ty)) {
for (unsigned i = 0; i < ST->getNumElements(); ++i) {
uint64_t SubMembers = 0;
if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers))
return false;
Members += SubMembers;
}
- } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
+ } else if (auto *AT = dyn_cast<ArrayType>(Ty)) {
uint64_t SubMembers = 0;
if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers))
return false;
return false;
Members = 1;
Base = HA_DOUBLE;
- } else if (const VectorType *VT = dyn_cast<VectorType>(Ty)) {
+ } else if (auto *VT = dyn_cast<VectorType>(Ty)) {
Members = 1;
switch (Base) {
case HA_FLOAT:
return whichFPReturnVariant(RetType) != NoFPRet;
}
-static bool needsFPReturnHelper(const FunctionType &FT) {
+static bool needsFPReturnHelper(FunctionType &FT) {
Type* RetType = FT.getReturnType();
return whichFPReturnVariant(RetType) != NoFPRet;
}
CallInst::Create(F, Params, "", &Inst );
} else if (const CallInst *CI = dyn_cast<CallInst>(I)) {
const Value* V = CI->getCalledValue();
- const Type* T = nullptr;
+ Type* T = nullptr;
if (V) T = V->getType();
- const PointerType *PFT=nullptr;
+ PointerType *PFT = nullptr;
if (T) PFT = dyn_cast<PointerType>(T);
- const FunctionType *FT=nullptr;
+ FunctionType *FT = nullptr;
if (PFT) FT = dyn_cast<FunctionType>(PFT->getElementType());
Function *F_ = CI->getCalledFunction();
if (FT && needsFPReturnHelper(*FT) &&
/// This function returns true if Ty is fp128, {f128} or i128 which was
/// originally a fp128.
-static bool originalTypeIsF128(const Type *Ty, const SDNode *CallNode) {
+static bool originalTypeIsF128(Type *Ty, const SDNode *CallNode) {
if (Ty->isFP128Ty())
return true;
if (isABI) {
if (Ty->isFloatingPointTy() || Ty->isIntegerTy()) {
unsigned size = 0;
- if (const IntegerType *ITy = dyn_cast<IntegerType>(Ty)) {
+ if (auto *ITy = dyn_cast<IntegerType>(Ty)) {
size = ITy->getBitWidth();
if (size < 32)
size = 32;
static bool canDemoteGlobalVar(const GlobalVariable *gv, Function const *&f) {
if (!gv->hasInternalLinkage())
return false;
- const PointerType *Pty = gv->getType();
+ PointerType *Pty = gv->getType();
if (Pty->getAddressSpace() != llvm::ADDRESS_SPACE_SHARED)
return false;
const DataLayout &DL = getDataLayout();
// GlobalVariables are always constant pointers themselves.
- const PointerType *PTy = GVar->getType();
+ PointerType *PTy = GVar->getType();
Type *ETy = PTy->getElementType();
if (GVar->hasExternalLinkage()) {
}
std::string
-NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty, bool useB4PTR) const {
+NVPTXAsmPrinter::getPTXFundamentalTypeStr(Type *Ty, bool useB4PTR) const {
switch (Ty->getTypeID()) {
default:
llvm_unreachable("unexpected type");
const DataLayout &DL = getDataLayout();
// GlobalVariables are always constant pointers themselves.
- const PointerType *PTy = GVar->getType();
+ PointerType *PTy = GVar->getType();
Type *ETy = PTy->getElementType();
O << ".";
if (Ty->isSingleValueType())
return DL.getPrefTypeAlignment(Ty);
- const ArrayType *ATy = dyn_cast<ArrayType>(Ty);
+ auto *ATy = dyn_cast<ArrayType>(Ty);
if (ATy)
return getOpenCLAlignment(DL, ATy->getElementType());
- const StructType *STy = dyn_cast<StructType>(Ty);
+ auto *STy = dyn_cast<StructType>(Ty);
if (STy) {
unsigned int alignStruct = 1;
// Go through each element of the struct and find the
return alignStruct;
}
- const FunctionType *FTy = dyn_cast<FunctionType>(Ty);
+ auto *FTy = dyn_cast<FunctionType>(Ty);
if (FTy)
return DL.getPointerPrefAlignment();
return DL.getPrefTypeAlignment(Ty);
continue;
}
// Just a scalar
- const PointerType *PTy = dyn_cast<PointerType>(Ty);
+ auto *PTy = dyn_cast<PointerType>(Ty);
if (isKernelFunc) {
if (PTy) {
// Special handling for pointer arguments to kernel
}
// param has byVal attribute. So should be a pointer
- const PointerType *PTy = dyn_cast<PointerType>(Ty);
+ auto *PTy = dyn_cast<PointerType>(Ty);
assert(PTy && "Param with byval attribute should be a pointer type");
Type *ETy = PTy->getElementType();
switch (CPV->getType()->getTypeID()) {
case Type::IntegerTyID: {
- const Type *ETy = CPV->getType();
+ Type *ETy = CPV->getType();
if (ETy == Type::getInt8Ty(CPV->getContext())) {
unsigned char c = (unsigned char)cast<ConstantInt>(CPV)->getZExtValue();
ConvertIntToBytes<>(ptr, c);
case Type::FloatTyID:
case Type::DoubleTyID: {
const ConstantFP *CFP = dyn_cast<ConstantFP>(CPV);
- const Type *Ty = CFP->getType();
+ Type *Ty = CFP->getType();
if (Ty == Type::getFloatTy(CPV->getContext())) {
float float32 = (float) CFP->getValueAPF().convertToFloat();
ConvertFloatToBytes(ptr, float32);
// buildTypeNameMap - Run through symbol table looking for type names.
//
-bool NVPTXAsmPrinter::isImageType(const Type *Ty) {
+bool NVPTXAsmPrinter::isImageType(Type *Ty) {
- std::map<const Type *, std::string>::iterator PI = TypeNameMap.find(Ty);
+ std::map<Type *, std::string>::iterator PI = TypeNameMap.find(Ty);
return PI != TypeNameMap.end() && (!PI->second.compare("struct._image1d_t") ||
!PI->second.compare("struct._image2d_t") ||
void emitFunctionParamList(const MachineFunction &MF, raw_ostream &O);
void setAndEmitFunctionVirtualRegisters(const MachineFunction &MF);
void emitFunctionTempData(const MachineFunction &MF, unsigned &FrameSize);
- bool isImageType(const Type *Ty);
+ bool isImageType(Type *Ty);
void printReturnValStr(const Function *, raw_ostream &O);
void printReturnValStr(const MachineFunction &MF, raw_ostream &O);
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
// Build the map between type name and ID based on module's type
// symbol table.
- std::map<const Type *, std::string> TypeNameMap;
+ std::map<Type *, std::string> TypeNameMap;
// List of variables demoted to a function scope.
std::map<const Function *, std::vector<const GlobalVariable *> > localDecls;
void emitPTXGlobalVariable(const GlobalVariable *GVar, raw_ostream &O);
void emitPTXAddressSpace(unsigned int AddressSpace, raw_ostream &O) const;
- std::string getPTXFundamentalTypeStr(const Type *Ty, bool = true) const;
+ std::string getPTXFundamentalTypeStr(Type *Ty, bool = true) const;
void printScalarConstant(const Constant *CPV, raw_ostream &O);
void printFPConstant(const ConstantFP *Fp, raw_ostream &O);
void bufferLEByte(const Constant *CPV, int Bytes, AggBuffer *aggBuffer);
if (!Src)
return NVPTX::PTXLdStInstCode::GENERIC;
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType())) {
+ if (auto *PT = dyn_cast<PointerType>(Src->getType())) {
switch (PT->getAddressSpace()) {
case llvm::ADDRESS_SPACE_LOCAL: return NVPTX::PTXLdStInstCode::LOCAL;
case llvm::ADDRESS_SPACE_GLOBAL: return NVPTX::PTXLdStInstCode::GLOBAL;
}
if (!Src)
return false;
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
+ if (auto *PT = dyn_cast<PointerType>(Src->getType()))
return (PT->getAddressSpace() == spN);
return false;
}
O << "(";
if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
unsigned size = 0;
- if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
+ if (auto *ITy = dyn_cast<IntegerType>(retTy)) {
size = ITy->getBitWidth();
if (size < 32)
size = 32;
O << "_";
continue;
}
- const PointerType *PTy = dyn_cast<PointerType>(Ty);
+ auto *PTy = dyn_cast<PointerType>(Ty);
assert(PTy && "Param with byval attribute should be a pointer type");
Type *ETy = PTy->getElementType();
// struct or vector
SmallVector<EVT, 16> vtparts;
SmallVector<uint64_t, 16> Offsets;
- const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
+ auto *PTy = dyn_cast<PointerType>(Args[i].Ty);
assert(PTy && "Type of a byval parameter should be pointer");
ComputePTXValueVTs(*this, DAG.getDataLayout(), PTy->getElementType(),
vtparts, &Offsets, 0);
"struct._image3d_t",
"struct._sampler_t" };
- const Type *Ty = arg->getType();
- const PointerType *PTy = dyn_cast<PointerType>(Ty);
+ Type *Ty = arg->getType();
+ auto *PTy = dyn_cast<PointerType>(Ty);
if (!PTy)
return false;
if (!context)
return false;
- const StructType *STy = dyn_cast<StructType>(PTy->getElementType());
+ auto *STy = dyn_cast<StructType>(PTy->getElementType());
const std::string TypeName = STy && !STy->isLiteral() ? STy->getName() : "";
for (int i = 0, e = array_lengthof(specialTypes); i != e; ++i)
/// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
/// Used to know whether to use cmpxchg8/16b when expanding atomic operations
/// (otherwise we leave them alone to become __sync_fetch_and_... calls).
-bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
+bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
unsigned OpWidth = MemType->getPrimitiveSizeInBits();
if (OpWidth == 64)
TargetLoweringBase::AtomicRMWExpansionKind
X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
- const Type *MemType = AI->getType();
+ Type *MemType = AI->getType();
// If the operand is too big, we must see if cmpxchg8/16b is available
// and default to library calls otherwise.
LoadInst *
X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
- const Type *MemType = AI->getType();
+ Type *MemType = AI->getType();
// Accesses larger than the native width are turned into cmpxchg/libcalls, so
// there is no benefit in turning such RMWs into loads, and it is actually
// harmful as it introduces a mfence.
LoadInst *
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const override;
- bool needsCmpXchgNb(const Type *MemType) const;
+ bool needsCmpXchgNb(Type *MemType) const;
/// Utility function to emit atomic-load-arith operations (and, or, xor,
/// nand, max, min, umax, umin). It takes the corresponding instruction to
unsigned TyLWidth = 0;
unsigned TyRWidth = 0;
- if (const VectorType *VecTyL = dyn_cast<VectorType>(TyL))
+ if (auto *VecTyL = dyn_cast<VectorType>(TyL))
TyLWidth = VecTyL->getBitWidth();
- if (const VectorType *VecTyR = dyn_cast<VectorType>(TyR))
+ if (auto *VecTyR = dyn_cast<VectorType>(TyR))
TyRWidth = VecTyR->getBitWidth();
if (TyLWidth != TyRWidth)
const std::string ExpStr = Exp ? "exp_" : "";
const std::string SuffixStr = CompileKernel ? "N" : "_n";
const std::string EndingStr = CompileKernel ? "_noabort" : "";
- const Type *ExpType = Exp ? Type::getInt32Ty(*C) : nullptr;
+ Type *ExpType = Exp ? Type::getInt32Ty(*C) : nullptr;
// TODO(glider): for KASan builds add _noabort to error reporting
// functions and make them actually noabort (remove the UnreachableInst).
AsanErrorCallbackSized[AccessIsWrite][Exp] =
// TODO: Once we can get to the GCStrategy, this becomes
// Optional<bool> isGCManagedPointer(const Value *V) const override {
-static bool isGCPointerType(const Type *T) {
- if (const PointerType *PT = dyn_cast<PointerType>(T))
+static bool isGCPointerType(Type *T) {
+ if (auto *PT = dyn_cast<PointerType>(T))
// For the sake of this example GC, we arbitrarily pick addrspace(1) as our
// GC managed heap. We know that a pointer into this heap needs to be
// updated and that no other pointer does.
/// Return true if a table with TableSize elements of
/// type ElementType would fit in a target-legal register.
static bool WouldFitInRegister(const DataLayout &DL, uint64_t TableSize,
- const Type *ElementType);
+ Type *ElementType);
private:
// Depending on the contents of the table, it can be represented in
bool SwitchLookupTable::WouldFitInRegister(const DataLayout &DL,
uint64_t TableSize,
- const Type *ElementType) {
- const IntegerType *IT = dyn_cast<IntegerType>(ElementType);
+ Type *ElementType) {
+ auto *IT = dyn_cast<IntegerType>(ElementType);
if (!IT)
return false;
// FIXME: If the type is wider than it needs to be, e.g. i8 but all values