X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FTargetData.cpp;h=5bcd6583635bed8003917f59b382932603738d17;hb=762ccea600158bb317dcccdff3303e942426cb71;hp=d4c9e58c47d06b1abb408ccd29674b6d675b30e9;hpb=3e15bf33e024b9df9e89351a165acfdb1dde51ed;p=oota-llvm.git diff --git a/lib/Target/TargetData.cpp b/lib/Target/TargetData.cpp index d4c9e58c47d..5bcd6583635 100644 --- a/lib/Target/TargetData.cpp +++ b/lib/Target/TargetData.cpp @@ -2,8 +2,8 @@ // // The LLVM Compiler Infrastructure // -// This file was developed by the LLVM research group and is distributed under -// the University of Illinois Open Source License. See LICENSE.TXT for details. +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // @@ -23,19 +23,20 @@ #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/System/Mutex.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/StringExtras.h" #include #include -#include using namespace llvm; // Handle the Pass registration stuff necessary to use TargetData's. -namespace { - // Register the default SparcV9 implementation... - RegisterPass X("targetdata", "Target Data Layout"); -} -const char TargetData::ID = 0; + +// Register the default SparcV9 implementation... +static RegisterPass X("targetdata", "Target Data Layout", false, + true); +char TargetData::ID = 0; //===----------------------------------------------------------------------===// // Support for StructLayout @@ -46,23 +47,20 @@ StructLayout::StructLayout(const StructType *ST, const TargetData &TD) { StructSize = 0; NumElements = ST->getNumElements(); - // Loop over each of the elements, placing them in memory... + // Loop over each of the elements, placing them in memory. for (unsigned i = 0, e = NumElements; i != e; ++i) { const Type *Ty = ST->getElementType(i); - unsigned TyAlign; - uint64_t TySize; - TyAlign = (ST->isPacked() ? 1 : TD.getABITypeAlignment(Ty)); - TySize = TD.getTypeSize(Ty); + unsigned TyAlign = ST->isPacked() ? 1 : TD.getABITypeAlignment(Ty); - // Add padding if necessary to make the data element aligned properly... - if (StructSize % TyAlign != 0) - StructSize = (StructSize/TyAlign + 1) * TyAlign; // Add padding... + // Add padding if necessary to align the data element properly. + if ((StructSize & (TyAlign-1)) != 0) + StructSize = TargetData::RoundUpAlignment(StructSize, TyAlign); - // Keep track of maximum alignment constraint + // Keep track of maximum alignment constraint. StructAlignment = std::max(TyAlign, StructAlignment); MemberOffsets[i] = StructSize; - StructSize += TySize; // Consume space for this data item + StructSize += TD.getTypeAllocSize(Ty); // Consume space for this data item } // Empty structures have alignment of 1 byte. @@ -70,8 +68,8 @@ StructLayout::StructLayout(const StructType *ST, const TargetData &TD) { // Add padding to the end of the struct so that it could be put in an array // and all array elements would be aligned correctly. - if (StructSize % StructAlignment != 0) - StructSize = (StructSize/StructAlignment + 1) * StructAlignment; + if ((StructSize & (StructAlignment-1)) != 0) + StructSize = TargetData::RoundUpAlignment(StructSize, StructAlignment); } @@ -83,9 +81,15 @@ unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const { assert(SI != &MemberOffsets[0] && "Offset not in structure type!"); --SI; assert(*SI <= Offset && "upper_bound didn't work"); - assert((SI == &MemberOffsets[0] || *(SI-1) < Offset) && + assert((SI == &MemberOffsets[0] || *(SI-1) <= Offset) && (SI+1 == &MemberOffsets[NumElements] || *(SI+1) > Offset) && "Upper bound didn't work!"); + + // Multiple fields can have the same offset if any of them are zero sized. + // For example, in { i32, [0 x i32], i32 }, searching for offset 4 will stop + // at the i32 element, because it is the last element at that offset. This is + // the right one to return, because anything after it will have a higher + // offset, implying that this element is non-empty. return SI-&MemberOffsets[0]; } @@ -96,6 +100,7 @@ unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const { TargetAlignElem TargetAlignElem::get(AlignTypeEnum align_type, unsigned char abi_align, unsigned char pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); TargetAlignElem retval; retval.AlignType = align_type; retval.ABIAlign = abi_align; @@ -146,17 +151,18 @@ const TargetAlignElem TargetData::InvalidAlignmentElem = [E|e]: Endianness. "E" specifies a big-endian target data model, "e" specifies a little-endian target data model.

- p:::: Pointer size, ABI and preferred - alignment. + p:@verbatim::@endverbatim: Pointer size, + ABI and preferred alignment.

- ::: Numeric type alignment. Type is - one of i|f|v|a, corresponding to integer, floating point, vector (aka - packed) or aggregate. Size indicates the size, e.g., 32 or 64 bits. + @verbatim::@endverbatim: Numeric type + alignment. Type is + one of i|f|v|a, corresponding to integer, floating point, vector, or + aggregate. Size indicates the size, e.g., 32 or 64 bits. \p - The default string, fully specified is: + The default string, fully specified, is:

- "E-p:64:64:64-a0:0:0-f32:32:32-f64:0:64" - "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:0:64" + "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64" + "-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64" "-v64:64:64-v128:128:128"

Note that in the case of aggregates, 0 is the default ABI and preferred @@ -166,23 +172,24 @@ const TargetAlignElem TargetData::InvalidAlignmentElem = void TargetData::init(const std::string &TargetDescription) { std::string temp = TargetDescription; + LayoutMap = 0; LittleEndian = false; PointerMemSize = 8; PointerABIAlign = 8; PointerPrefAlign = PointerABIAlign; // Default alignments - setAlignment(INTEGER_ALIGN, 1, 1, 1); // Bool - setAlignment(INTEGER_ALIGN, 1, 1, 8); // Byte - setAlignment(INTEGER_ALIGN, 2, 2, 16); // short - setAlignment(INTEGER_ALIGN, 4, 4, 32); // int - setAlignment(INTEGER_ALIGN, 4, 8, 64); // long + setAlignment(INTEGER_ALIGN, 1, 1, 1); // i1 + setAlignment(INTEGER_ALIGN, 1, 1, 8); // i8 + setAlignment(INTEGER_ALIGN, 2, 2, 16); // i16 + setAlignment(INTEGER_ALIGN, 4, 4, 32); // i32 + setAlignment(INTEGER_ALIGN, 4, 8, 64); // i64 setAlignment(FLOAT_ALIGN, 4, 4, 32); // float setAlignment(FLOAT_ALIGN, 8, 8, 64); // double - setAlignment(VECTOR_ALIGN, 8, 8, 64); // v2i32 + setAlignment(VECTOR_ALIGN, 8, 8, 64); // v2i32, v1i64, ... setAlignment(VECTOR_ALIGN, 16, 16, 128); // v16i8, v8i16, v4i32, ... - setAlignment(AGGREGATE_ALIGN, 0, 8, 0); // struct, union, class, ... - + setAlignment(AGGREGATE_ALIGN, 0, 8, 0); // struct + while (!temp.empty()) { std::string token = getToken(temp, "-"); std::string arg0 = getToken(token, ":"); @@ -204,10 +211,16 @@ void TargetData::init(const std::string &TargetDescription) { case 'i': case 'v': case 'f': - case 'a': { - AlignTypeEnum align_type = - (*p == 'i' ? INTEGER_ALIGN : (*p == 'f' ? FLOAT_ALIGN : - (*p == 'v' ? VECTOR_ALIGN : AGGREGATE_ALIGN))); + case 'a': + case 's': { + AlignTypeEnum align_type = STACK_ALIGN; // Dummy init, silence warning + switch(*p) { + case 'i': align_type = INTEGER_ALIGN; break; + case 'v': align_type = VECTOR_ALIGN; break; + case 'f': align_type = FLOAT_ALIGN; break; + case 'a': align_type = AGGREGATE_ALIGN; break; + case 's': align_type = STACK_ALIGN; break; + } uint32_t size = (uint32_t) atoi(++p); unsigned char abi_align = atoi(getToken(token, ":").c_str()) / 8; unsigned char pref_align = atoi(getToken(token, ":").c_str()) / 8; @@ -223,13 +236,14 @@ void TargetData::init(const std::string &TargetDescription) { } TargetData::TargetData(const Module *M) - : ImmutablePass((intptr_t)&ID) { + : ImmutablePass(&ID) { init(M->getDataLayout()); } void TargetData::setAlignment(AlignTypeEnum align_type, unsigned char abi_align, unsigned char pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); for (unsigned i = 0, e = Alignments.size(); i != e; ++i) { if (Alignments[i].AlignType == align_type && Alignments[i].TypeBitWidth == bit_width) { @@ -247,7 +261,8 @@ TargetData::setAlignment(AlignTypeEnum align_type, unsigned char abi_align, /// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or /// preferred if ABIInfo = false) the target wants for the specified datatype. unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType, - uint32_t BitWidth, bool ABIInfo) const { + uint32_t BitWidth, bool ABIInfo, + const Type *Ty) const { // Check to see if we have an exact match and remember the best match we see. int BestMatchIdx = -1; int LargestInt = -1; @@ -257,15 +272,14 @@ unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType, return ABIInfo ? Alignments[i].ABIAlign : Alignments[i].PrefAlign; // The best match so far depends on what we're looking for. - if (AlignType == VECTOR_ALIGN) { + if (AlignType == VECTOR_ALIGN && Alignments[i].AlignType == VECTOR_ALIGN) { // If this is a specification for a smaller vector type, we will fall back // to it. This happens because <128 x double> can be implemented in terms // of 64 <2 x double>. - if (Alignments[i].AlignType == VECTOR_ALIGN && - Alignments[i].TypeBitWidth < BitWidth) { + if (Alignments[i].TypeBitWidth < BitWidth) { // Verify that we pick the biggest of the fallbacks. if (BestMatchIdx == -1 || - Alignments[BestMatchIdx].TypeBitWidth < BitWidth) + Alignments[BestMatchIdx].TypeBitWidth < Alignments[i].TypeBitWidth) BestMatchIdx = i; } } else if (AlignType == INTEGER_ALIGN && @@ -283,63 +297,51 @@ unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType, } } - // For integers, if we didn't find a best match, use the largest one found. - if (BestMatchIdx == -1) - BestMatchIdx = LargestInt; - // Okay, we didn't find an exact solution. Fall back here depending on what // is being looked for. - assert(BestMatchIdx != -1 && "Didn't find alignment info for this datatype!"); + if (BestMatchIdx == -1) { + // If we didn't find an integer alignment, fall back on most conservative. + if (AlignType == INTEGER_ALIGN) { + BestMatchIdx = LargestInt; + } else { + assert(AlignType == VECTOR_ALIGN && "Unknown alignment type!"); + + // If we didn't find a vector size that is smaller or equal to this type, + // then we will end up scalarizing this to its element type. Just return + // the alignment of the element. + return getAlignment(cast(Ty)->getElementType(), ABIInfo); + } + } // Since we got a "best match" index, just return it. return ABIInfo ? Alignments[BestMatchIdx].ABIAlign : Alignments[BestMatchIdx].PrefAlign; } -/// LayoutInfo - The lazy cache of structure layout information maintained by -/// TargetData. Note that the struct types must have been free'd before -/// llvm_shutdown is called (and thus this is deallocated) because all the -/// targets with cached elements should have been destroyed. -/// -typedef std::pair LayoutKey; - -struct DenseMapLayoutKeyInfo { - static inline LayoutKey getEmptyKey() { return LayoutKey(0, 0); } - static inline LayoutKey getTombstoneKey() { - return LayoutKey((TargetData*)(intptr_t)-1, 0); - } - static unsigned getHashValue(const LayoutKey &Val) { - return DenseMapKeyInfo::getHashValue(Val.first) ^ - DenseMapKeyInfo::getHashValue(Val.second); - } - static bool isPod() { return true; } -}; - -typedef DenseMap LayoutInfoTy; -static ManagedStatic LayoutInfo; - +typedef DenseMapLayoutInfoTy; TargetData::~TargetData() { - if (LayoutInfo.isConstructed()) { - // Remove any layouts for this TD. - LayoutInfoTy &TheMap = *LayoutInfo; - for (LayoutInfoTy::iterator I = TheMap.begin(), E = TheMap.end(); - I != E; ) { - if (I->first.first == this) { - I->second->~StructLayout(); - free(I->second); - TheMap.erase(I++); - } else { - ++I; - } - } + if (!LayoutMap) + return; + + // Remove any layouts for this TD. + LayoutInfoTy &TheMap = *static_cast(LayoutMap); + for (LayoutInfoTy::iterator I = TheMap.begin(), E = TheMap.end(); I != E; ) { + I->second->~StructLayout(); + free(I->second); + TheMap.erase(I++); } + + delete static_cast(LayoutMap); } const StructLayout *TargetData::getStructLayout(const StructType *Ty) const { - LayoutInfoTy &TheMap = *LayoutInfo; + if (!LayoutMap) + LayoutMap = static_cast(new LayoutInfoTy()); - StructLayout *&SL = TheMap[LayoutKey(this, Ty)]; + LayoutInfoTy &TheMap = *static_cast(LayoutMap); + + StructLayout *&SL = TheMap[Ty]; if (SL) return SL; // Otherwise, create the struct layout. Because it is variable length, we @@ -361,14 +363,15 @@ const StructLayout *TargetData::getStructLayout(const StructType *Ty) const { /// removed, this method must be called whenever a StructType is removed to /// avoid a dangling pointer in this cache. void TargetData::InvalidateStructLayoutInfo(const StructType *Ty) const { - if (!LayoutInfo.isConstructed()) return; // No cache. + if (!LayoutMap) return; // No cache. - LayoutInfoTy::iterator I = LayoutInfo->find(LayoutKey(this, Ty)); - if (I != LayoutInfo->end()) { - I->second->~StructLayout(); - free(I->second); - LayoutInfo->erase(I); - } + LayoutInfoTy* LayoutInfo = static_cast(LayoutMap); + LayoutInfoTy::iterator I = LayoutInfo->find(Ty); + if (I == LayoutInfo->end()) return; + + I->second->~StructLayout(); + free(I->second); + LayoutInfo->erase(I); } @@ -390,71 +393,43 @@ std::string TargetData::getStringRepresentation() const { } -uint64_t TargetData::getTypeSize(const Type *Ty) const { +uint64_t TargetData::getTypeSizeInBits(const Type *Ty) const { assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); switch (Ty->getTypeID()) { case Type::LabelTyID: case Type::PointerTyID: - return getPointerSize(); + return getPointerSizeInBits(); case Type::ArrayTyID: { const ArrayType *ATy = cast(Ty); - uint64_t Size; - unsigned char Alignment; - Size = getTypeSize(ATy->getElementType()); - Alignment = getABITypeAlignment(ATy->getElementType()); - uint64_t AlignedSize = (Size + Alignment - 1)/Alignment*Alignment; - return AlignedSize*ATy->getNumElements(); + return getTypeAllocSizeInBits(ATy->getElementType())*ATy->getNumElements(); } - case Type::StructTyID: { + case Type::StructTyID: // Get the layout annotation... which is lazily created on demand. - const StructLayout *Layout = getStructLayout(cast(Ty)); - return Layout->getSizeInBytes(); - } - case Type::IntegerTyID: { - unsigned BitWidth = cast(Ty)->getBitWidth(); - if (BitWidth <= 8) { - return 1; - } else if (BitWidth <= 16) { - return 2; - } else if (BitWidth <= 32) { - return 4; - } else if (BitWidth <= 64) { - return 8; - } else { - // The size of this > 64 bit type is chosen as a multiple of the - // preferred alignment of the largest "native" size the target supports. - // We first obtain the the alignment info for this type and then compute - // the next largest multiple of that size. - uint64_t size = getAlignmentInfo(INTEGER_ALIGN, BitWidth, false) * 8; - return (((BitWidth / (size)) + (BitWidth % size != 0)) * size) / 8; - } - break; - } + return getStructLayout(cast(Ty))->getSizeInBits(); + case Type::IntegerTyID: + return cast(Ty)->getBitWidth(); case Type::VoidTyID: - return 1; + return 8; case Type::FloatTyID: - return 4; + return 32; case Type::DoubleTyID: - return 8; - case Type::VectorTyID: { - const VectorType *PTy = cast(Ty); - return PTy->getBitWidth() / 8; - } + return 64; + case Type::PPC_FP128TyID: + case Type::FP128TyID: + return 128; + // In memory objects this is always aligned to a higher boundary, but + // only 80 bits contain information. + case Type::X86_FP80TyID: + return 80; + case Type::VectorTyID: + return cast(Ty)->getBitWidth(); default: - assert(0 && "TargetData::getTypeSize(): Unsupported type"); + llvm_unreachable("TargetData::getTypeSizeInBits(): Unsupported type"); break; } return 0; } -uint64_t TargetData::getTypeSizeInBits(const Type *Ty) const { - if (Ty->isInteger()) - return cast(Ty)->getBitWidth(); - else - return getTypeSize(Ty) * 8; -} - - /*! \param abi_or_pref Flag that determines which alignment is returned. true returns the ABI alignment, false returns the preferred alignment. @@ -468,7 +443,7 @@ unsigned char TargetData::getAlignment(const Type *Ty, bool abi_or_pref) const { assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); switch (Ty->getTypeID()) { - /* Early escape for the non-numeric types */ + // Early escape for the non-numeric types. case Type::LabelTyID: case Type::PointerTyID: return (abi_or_pref @@ -476,15 +451,15 @@ unsigned char TargetData::getAlignment(const Type *Ty, bool abi_or_pref) const { : getPointerPrefAlignment()); case Type::ArrayTyID: return getAlignment(cast(Ty)->getElementType(), abi_or_pref); - + case Type::StructTyID: { // Packed structure types always have an ABI alignment of one. if (cast(Ty)->isPacked() && abi_or_pref) return 1; - + // Get the layout annotation... which is lazily created on demand. const StructLayout *Layout = getStructLayout(cast(Ty)); - unsigned Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref); + unsigned Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty); return std::max(Align, (unsigned)Layout->getAlignment()); } case Type::IntegerTyID: @@ -493,30 +468,37 @@ unsigned char TargetData::getAlignment(const Type *Ty, bool abi_or_pref) const { break; case Type::FloatTyID: case Type::DoubleTyID: + // PPC_FP128TyID and FP128TyID have different data contents, but the + // same size and alignment, so they look the same here. + case Type::PPC_FP128TyID: + case Type::FP128TyID: + case Type::X86_FP80TyID: AlignType = FLOAT_ALIGN; break; - case Type::VectorTyID: { - const VectorType *VTy = cast(Ty); - // Degenerate vectors are assumed to be scalar-ized - if (VTy->getNumElements() == 1) - return getAlignment(VTy->getElementType(), abi_or_pref); - else - AlignType = VECTOR_ALIGN; + case Type::VectorTyID: + AlignType = VECTOR_ALIGN; break; - } default: - assert(0 && "Bad type for getAlignment!!!"); + llvm_unreachable("Bad type for getAlignment!!!"); break; } - return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSize(Ty) * 8, - abi_or_pref); + return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty), + abi_or_pref, Ty); } unsigned char TargetData::getABITypeAlignment(const Type *Ty) const { return getAlignment(Ty, true); } +unsigned char TargetData::getCallFrameTypeAlignment(const Type *Ty) const { + for (unsigned i = 0, e = Alignments.size(); i != e; ++i) + if (Alignments[i].AlignType == STACK_ALIGN) + return Alignments[i].ABIAlign; + + return getABITypeAlignment(Ty); +} + unsigned char TargetData::getPrefTypeAlignment(const Type *Ty) const { return getAlignment(Ty, false); } @@ -529,13 +511,8 @@ unsigned char TargetData::getPreferredTypeAlignmentShift(const Type *Ty) const { /// getIntPtrType - Return an unsigned integer type that is the same size or /// greater to the host pointer size. -const Type *TargetData::getIntPtrType() const { - switch (getPointerSize()) { - default: assert(0 && "Unknown pointer size!"); - case 2: return Type::Int16Ty; - case 4: return Type::Int32Ty; - case 8: return Type::Int64Ty; - } +const IntegerType *TargetData::getIntPtrType(LLVMContext &C) const { + return IntegerType::get(C, getPointerSizeInBits()); } @@ -549,7 +526,8 @@ uint64_t TargetData::getIndexedOffset(const Type *ptrTy, Value* const* Indices, TI = gep_type_begin(ptrTy, Indices, Indices+NumIndices); for (unsigned CurIDX = 0; CurIDX != NumIndices; ++CurIDX, ++TI) { if (const StructType *STy = dyn_cast(*TI)) { - assert(Indices[CurIDX]->getType() == Type::Int32Ty && + assert(Indices[CurIDX]->getType() == + Type::getInt32Ty(ptrTy->getContext()) && "Illegal struct idx"); unsigned FieldNo = cast(Indices[CurIDX])->getZExtValue(); @@ -567,29 +545,36 @@ uint64_t TargetData::getIndexedOffset(const Type *ptrTy, Value* const* Indices, // Get the array index and the size of each array element. int64_t arrayIdx = cast(Indices[CurIDX])->getSExtValue(); - Result += arrayIdx * (int64_t)getTypeSize(Ty); + Result += arrayIdx * (int64_t)getTypeAllocSize(Ty); } } return Result; } -/// getPreferredAlignmentLog - Return the preferred alignment of the -/// specified global, returned in log form. This includes an explicitly -/// requested alignment (if the global has one). -unsigned TargetData::getPreferredAlignmentLog(const GlobalVariable *GV) const { +/// getPreferredAlignment - Return the preferred alignment of the specified +/// global. This includes an explicitly requested alignment (if the global +/// has one). +unsigned TargetData::getPreferredAlignment(const GlobalVariable *GV) const { const Type *ElemType = GV->getType()->getElementType(); - unsigned Alignment = getPreferredTypeAlignmentShift(ElemType); - if (GV->getAlignment() > (1U << Alignment)) - Alignment = Log2_32(GV->getAlignment()); - + unsigned Alignment = getPrefTypeAlignment(ElemType); + if (GV->getAlignment() > Alignment) + Alignment = GV->getAlignment(); + if (GV->hasInitializer()) { - if (Alignment < 4) { + if (Alignment < 16) { // If the global is not external, see if it is large. If so, give it a // larger alignment. - if (getTypeSize(ElemType) > 128) - Alignment = 4; // 16-byte alignment. + if (getTypeSizeInBits(ElemType) > 128) + Alignment = 16; // 16-byte alignment. } } return Alignment; } + +/// getPreferredAlignmentLog - Return the preferred alignment of the +/// specified global, returned in log form. This includes an explicitly +/// requested alignment (if the global has one). +unsigned TargetData::getPreferredAlignmentLog(const GlobalVariable *GV) const { + return Log2_32(getPreferredAlignment(GV)); +}