///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "sroa"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/PtrUseVisitor.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/DIBuilder.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
-#include "llvm/InstVisitor.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/TimeValue.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
+
+#if __cplusplus >= 201103L && !defined(NDEBUG)
+// We only use this for a debug check in C++11
+#include <random>
+#endif
+
using namespace llvm;
+#define DEBUG_TYPE "sroa"
+
STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
-STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
-STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
+STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed");
+STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca");
+STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten");
+STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition");
+STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
+STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
-STATISTIC(NumDeleted, "Number of instructions deleted");
-STATISTIC(NumVectorized, "Number of vectorized aggregates");
+STATISTIC(NumDeleted, "Number of instructions deleted");
+STATISTIC(NumVectorized, "Number of vectorized aggregates");
/// Hidden option to force the pass to not use DomTree and mem2reg, instead
/// forming SSA values through the SSAUpdater infrastructure.
static cl::opt<bool>
ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden);
+/// Hidden option to enable randomly shuffling the slices to help uncover
+/// instability in their order.
+static cl::opt<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices",
+ cl::init(false), cl::Hidden);
+
+/// Hidden option to experiment with completely strict handling of inbounds
+/// GEPs.
+static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds",
+ cl::init(false), cl::Hidden);
+
namespace {
-/// \brief Alloca partitioning representation.
-///
-/// This class represents a partitioning of an alloca into slices, and
-/// information about the nature of uses of each slice of the alloca. The goal
-/// is that this information is sufficient to decide if and how to split the
-/// alloca apart and replace slices with scalars. It is also intended that this
-/// structure can capture the relevant information needed both to decide about
-/// and to enact these transformations.
-class AllocaPartitioning {
+/// \brief A custom IRBuilder inserter which prefixes all names if they are
+/// preserved.
+template <bool preserveNames = true>
+class IRBuilderPrefixedInserter :
+ public IRBuilderDefaultInserter<preserveNames> {
+ std::string Prefix;
+
public:
- /// \brief A common base class for representing a half-open byte range.
- struct ByteRange {
- /// \brief The beginning offset of the range.
- uint64_t BeginOffset;
+ void SetNamePrefix(const Twine &P) { Prefix = P.str(); }
- /// \brief The ending offset, not included in the range.
- uint64_t EndOffset;
+protected:
+ void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
+ BasicBlock::iterator InsertPt) const {
+ IRBuilderDefaultInserter<preserveNames>::InsertHelper(
+ I, Name.isTriviallyEmpty() ? Name : Prefix + Name, BB, InsertPt);
+ }
+};
- ByteRange() : BeginOffset(), EndOffset() {}
- ByteRange(uint64_t BeginOffset, uint64_t EndOffset)
- : BeginOffset(BeginOffset), EndOffset(EndOffset) {}
+// Specialization for not preserving the name is trivial.
+template <>
+class IRBuilderPrefixedInserter<false> :
+ public IRBuilderDefaultInserter<false> {
+public:
+ void SetNamePrefix(const Twine &P) {}
+};
- /// \brief Support for ordering ranges.
- ///
- /// This provides an ordering over ranges such that start offsets are
- /// always increasing, and within equal start offsets, the end offsets are
- /// decreasing. Thus the spanning range comes first in a cluster with the
- /// same start position.
- bool operator<(const ByteRange &RHS) const {
- if (BeginOffset < RHS.BeginOffset) return true;
- if (BeginOffset > RHS.BeginOffset) return false;
- if (EndOffset > RHS.EndOffset) return true;
- return false;
- }
+/// \brief Provide a typedef for IRBuilder that drops names in release builds.
+#ifndef NDEBUG
+typedef llvm::IRBuilder<true, ConstantFolder,
+ IRBuilderPrefixedInserter<true> > IRBuilderTy;
+#else
+typedef llvm::IRBuilder<false, ConstantFolder,
+ IRBuilderPrefixedInserter<false> > IRBuilderTy;
+#endif
+}
- /// \brief Support comparison with a single offset to allow binary searches.
- friend bool operator<(const ByteRange &LHS, uint64_t RHSOffset) {
- return LHS.BeginOffset < RHSOffset;
- }
+namespace {
+/// \brief A used slice of an alloca.
+///
+/// This structure represents a slice of an alloca used by some instruction. It
+/// stores both the begin and end offsets of this use, a pointer to the use
+/// itself, and a flag indicating whether we can classify the use as splittable
+/// or not when forming partitions of the alloca.
+class Slice {
+ /// \brief The beginning offset of the range.
+ uint64_t BeginOffset;
- friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
- const ByteRange &RHS) {
- return LHSOffset < RHS.BeginOffset;
- }
+ /// \brief The ending offset, not included in the range.
+ uint64_t EndOffset;
- bool operator==(const ByteRange &RHS) const {
- return BeginOffset == RHS.BeginOffset && EndOffset == RHS.EndOffset;
- }
- bool operator!=(const ByteRange &RHS) const { return !operator==(RHS); }
- };
+ /// \brief Storage for both the use of this slice and whether it can be
+ /// split.
+ PointerIntPair<Use *, 1, bool> UseAndIsSplittable;
- /// \brief A partition of an alloca.
- ///
- /// This structure represents a contiguous partition of the alloca. These are
- /// formed by examining the uses of the alloca. During formation, they may
- /// overlap but once an AllocaPartitioning is built, the Partitions within it
- /// are all disjoint.
- struct Partition : public ByteRange {
- /// \brief Whether this partition is splittable into smaller partitions.
- ///
- /// We flag partitions as splittable when they are formed entirely due to
- /// accesses by trivially splittable operations such as memset and memcpy.
- bool IsSplittable;
-
- /// \brief Test whether a partition has been marked as dead.
- bool isDead() const {
- if (BeginOffset == UINT64_MAX) {
- assert(EndOffset == UINT64_MAX);
- return true;
- }
- return false;
- }
+public:
+ Slice() : BeginOffset(), EndOffset() {}
+ Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable)
+ : BeginOffset(BeginOffset), EndOffset(EndOffset),
+ UseAndIsSplittable(U, IsSplittable) {}
- /// \brief Kill a partition.
- /// This is accomplished by setting both its beginning and end offset to
- /// the maximum possible value.
- void kill() {
- assert(!isDead() && "He's Dead, Jim!");
- BeginOffset = EndOffset = UINT64_MAX;
- }
+ uint64_t beginOffset() const { return BeginOffset; }
+ uint64_t endOffset() const { return EndOffset; }
- Partition() : ByteRange(), IsSplittable() {}
- Partition(uint64_t BeginOffset, uint64_t EndOffset, bool IsSplittable)
- : ByteRange(BeginOffset, EndOffset), IsSplittable(IsSplittable) {}
- };
+ bool isSplittable() const { return UseAndIsSplittable.getInt(); }
+ void makeUnsplittable() { UseAndIsSplittable.setInt(false); }
- /// \brief A particular use of a partition of the alloca.
- ///
- /// This structure is used to associate uses of a partition with it. They
- /// mark the range of bytes which are referenced by a particular instruction,
- /// and includes a handle to the user itself and the pointer value in use.
- /// The bounds of these uses are determined by intersecting the bounds of the
- /// memory use itself with a particular partition. As a consequence there is
- /// intentionally overlap between various uses of the same partition.
- struct PartitionUse : public ByteRange {
- /// \brief The use in question. Provides access to both user and used value.
- ///
- /// Note that this may be null if the partition use is *dead*, that is, it
- /// should be ignored.
- Use *U;
+ Use *getUse() const { return UseAndIsSplittable.getPointer(); }
- PartitionUse() : ByteRange(), U() {}
- PartitionUse(uint64_t BeginOffset, uint64_t EndOffset, Use *U)
- : ByteRange(BeginOffset, EndOffset), U(U) {}
- };
+ bool isDead() const { return getUse() == nullptr; }
+ void kill() { UseAndIsSplittable.setPointer(nullptr); }
- /// \brief Construct a partitioning of a particular alloca.
+ /// \brief Support for ordering ranges.
///
- /// Construction does most of the work for partitioning the alloca. This
- /// performs the necessary walks of users and builds a partitioning from it.
- AllocaPartitioning(const DataLayout &TD, AllocaInst &AI);
+ /// This provides an ordering over ranges such that start offsets are
+ /// always increasing, and within equal start offsets, the end offsets are
+ /// decreasing. Thus the spanning range comes first in a cluster with the
+ /// same start position.
+ bool operator<(const Slice &RHS) const {
+ if (beginOffset() < RHS.beginOffset()) return true;
+ if (beginOffset() > RHS.beginOffset()) return false;
+ if (isSplittable() != RHS.isSplittable()) return !isSplittable();
+ if (endOffset() > RHS.endOffset()) return true;
+ return false;
+ }
+
+ /// \brief Support comparison with a single offset to allow binary searches.
+ friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS,
+ uint64_t RHSOffset) {
+ return LHS.beginOffset() < RHSOffset;
+ }
+ friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
+ const Slice &RHS) {
+ return LHSOffset < RHS.beginOffset();
+ }
+
+ bool operator==(const Slice &RHS) const {
+ return isSplittable() == RHS.isSplittable() &&
+ beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset();
+ }
+ bool operator!=(const Slice &RHS) const { return !operator==(RHS); }
+};
+} // end anonymous namespace
+
+namespace llvm {
+template <typename T> struct isPodLike;
+template <> struct isPodLike<Slice> {
+ static const bool value = true;
+};
+}
+
+namespace {
+/// \brief Representation of the alloca slices.
+///
+/// This class represents the slices of an alloca which are formed by its
+/// various uses. If a pointer escapes, we can't fully build a representation
+/// for the slices used and we reflect that in this structure. The uses are
+/// stored, sorted by increasing beginning offset and with unsplittable slices
+/// starting at a particular offset before splittable slices.
+class AllocaSlices {
+public:
+ /// \brief Construct the slices of a particular alloca.
+ AllocaSlices(const DataLayout &DL, AllocaInst &AI);
/// \brief Test whether a pointer to the allocation escapes our analysis.
///
- /// If this is true, the partitioning is never fully built and should be
+ /// If this is true, the slices are never fully built and should be
/// ignored.
bool isEscaped() const { return PointerEscapingInstr; }
- /// \brief Support for iterating over the partitions.
+ /// \brief Support for iterating over the slices.
/// @{
- typedef SmallVectorImpl<Partition>::iterator iterator;
- iterator begin() { return Partitions.begin(); }
- iterator end() { return Partitions.end(); }
-
- typedef SmallVectorImpl<Partition>::const_iterator const_iterator;
- const_iterator begin() const { return Partitions.begin(); }
- const_iterator end() const { return Partitions.end(); }
- /// @}
+ typedef SmallVectorImpl<Slice>::iterator iterator;
+ iterator begin() { return Slices.begin(); }
+ iterator end() { return Slices.end(); }
- /// \brief Support for iterating over and manipulating a particular
- /// partition's uses.
- ///
- /// The iteration support provided for uses is more limited, but also
- /// includes some manipulation routines to support rewriting the uses of
- /// partitions during SROA.
- /// @{
- typedef SmallVectorImpl<PartitionUse>::iterator use_iterator;
- use_iterator use_begin(unsigned Idx) { return Uses[Idx].begin(); }
- use_iterator use_begin(const_iterator I) { return Uses[I - begin()].begin(); }
- use_iterator use_end(unsigned Idx) { return Uses[Idx].end(); }
- use_iterator use_end(const_iterator I) { return Uses[I - begin()].end(); }
-
- typedef SmallVectorImpl<PartitionUse>::const_iterator const_use_iterator;
- const_use_iterator use_begin(unsigned Idx) const { return Uses[Idx].begin(); }
- const_use_iterator use_begin(const_iterator I) const {
- return Uses[I - begin()].begin();
- }
- const_use_iterator use_end(unsigned Idx) const { return Uses[Idx].end(); }
- const_use_iterator use_end(const_iterator I) const {
- return Uses[I - begin()].end();
- }
-
- unsigned use_size(unsigned Idx) const { return Uses[Idx].size(); }
- unsigned use_size(const_iterator I) const { return Uses[I - begin()].size(); }
- const PartitionUse &getUse(unsigned PIdx, unsigned UIdx) const {
- return Uses[PIdx][UIdx];
- }
- const PartitionUse &getUse(const_iterator I, unsigned UIdx) const {
- return Uses[I - begin()][UIdx];
- }
-
- void use_push_back(unsigned Idx, const PartitionUse &PU) {
- Uses[Idx].push_back(PU);
- }
- void use_push_back(const_iterator I, const PartitionUse &PU) {
- Uses[I - begin()].push_back(PU);
- }
+ typedef SmallVectorImpl<Slice>::const_iterator const_iterator;
+ const_iterator begin() const { return Slices.begin(); }
+ const_iterator end() const { return Slices.end(); }
/// @}
/// \brief Allow iterating the dead users for this alloca.
dead_op_iterator dead_op_end() const { return DeadOperands.end(); }
/// @}
- /// \brief MemTransferInst auxiliary data.
- /// This struct provides some auxiliary data about memory transfer
- /// intrinsics such as memcpy and memmove. These intrinsics can use two
- /// different ranges within the same alloca, and provide other challenges to
- /// correctly represent. We stash extra data to help us untangle this
- /// after the partitioning is complete.
- struct MemTransferOffsets {
- /// The destination begin and end offsets when the destination is within
- /// this alloca. If the end offset is zero the destination is not within
- /// this alloca.
- uint64_t DestBegin, DestEnd;
-
- /// The source begin and end offsets when the source is within this alloca.
- /// If the end offset is zero, the source is not within this alloca.
- uint64_t SourceBegin, SourceEnd;
-
- /// Flag for whether an alloca is splittable.
- bool IsSplittable;
- };
- MemTransferOffsets getMemTransferOffsets(MemTransferInst &II) const {
- return MemTransferInstData.lookup(&II);
- }
-
- /// \brief Map from a PHI or select operand back to a partition.
- ///
- /// When manipulating PHI nodes or selects, they can use more than one
- /// partition of an alloca. We store a special mapping to allow finding the
- /// partition referenced by each of these operands, if any.
- iterator findPartitionForPHIOrSelectOperand(Use *U) {
- SmallDenseMap<Use *, std::pair<unsigned, unsigned> >::const_iterator MapIt
- = PHIOrSelectOpMap.find(U);
- if (MapIt == PHIOrSelectOpMap.end())
- return end();
-
- return begin() + MapIt->second.first;
- }
-
- /// \brief Map from a PHI or select operand back to the specific use of
- /// a partition.
- ///
- /// Similar to mapping these operands back to the partitions, this maps
- /// directly to the use structure of that partition.
- use_iterator findPartitionUseForPHIOrSelectOperand(Use *U) {
- SmallDenseMap<Use *, std::pair<unsigned, unsigned> >::const_iterator MapIt
- = PHIOrSelectOpMap.find(U);
- assert(MapIt != PHIOrSelectOpMap.end());
- return Uses[MapIt->second.first].begin() + MapIt->second.second;
- }
-
- /// \brief Compute a common type among the uses of a particular partition.
- ///
- /// This routines walks all of the uses of a particular partition and tries
- /// to find a common type between them. Untyped operations such as memset and
- /// memcpy are ignored.
- Type *getCommonType(iterator I) const;
-
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const;
- void printUsers(raw_ostream &OS, const_iterator I,
+ void printSlice(raw_ostream &OS, const_iterator I,
StringRef Indent = " ") const;
+ void printUse(raw_ostream &OS, const_iterator I,
+ StringRef Indent = " ") const;
void print(raw_ostream &OS) const;
- void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const;
- void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump() const;
+ void dump(const_iterator I) const;
+ void dump() const;
#endif
private:
template <typename DerivedT, typename RetT = void> class BuilderBase;
- class PartitionBuilder;
- friend class AllocaPartitioning::PartitionBuilder;
- class UseBuilder;
- friend class AllocaPartitioning::UseBuilder;
+ class SliceBuilder;
+ friend class AllocaSlices::SliceBuilder;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// \brief Handle to alloca instruction to simplify method interfaces.
AllocaInst &AI;
#endif
- /// \brief The instruction responsible for this alloca having no partitioning.
+ /// \brief The instruction responsible for this alloca not having a known set
+ /// of slices.
///
/// When an instruction (potentially) escapes the pointer to the alloca, we
- /// store a pointer to that here and abort trying to partition the alloca.
- /// This will be null if the alloca is partitioned successfully.
+ /// store a pointer to that here and abort trying to form slices of the
+ /// alloca. This will be null if the alloca slices are analyzed successfully.
Instruction *PointerEscapingInstr;
- /// \brief The partitions of the alloca.
+ /// \brief The slices of the alloca.
///
- /// We store a vector of the partitions over the alloca here. This vector is
- /// sorted by increasing begin offset, and then by decreasing end offset. See
- /// the Partition inner class for more details. Initially (during
- /// construction) there are overlaps, but we form a disjoint sequence of
- /// partitions while finishing construction and a fully constructed object is
- /// expected to always have this as a disjoint space.
- SmallVector<Partition, 8> Partitions;
-
- /// \brief The uses of the partitions.
- ///
- /// This is essentially a mapping from each partition to a list of uses of
- /// that partition. The mapping is done with a Uses vector that has the exact
- /// same number of entries as the partition vector. Each entry is itself
- /// a vector of the uses.
- SmallVector<SmallVector<PartitionUse, 2>, 8> Uses;
+ /// We store a vector of the slices formed by uses of the alloca here. This
+ /// vector is sorted by increasing begin offset, and then the unsplittable
+ /// slices before the splittable ones. See the Slice inner class for more
+ /// details.
+ SmallVector<Slice, 8> Slices;
/// \brief Instructions which will become dead if we rewrite the alloca.
///
- /// Note that these are not separated by partition. This is because we expect
- /// a partitioned alloca to be completely rewritten or not rewritten at all.
- /// If rewritten, all these instructions can simply be removed and replaced
- /// with undef as they come from outside of the allocated space.
+ /// Note that these are not separated by slice. This is because we expect an
+ /// alloca to be completely rewritten or not rewritten at all. If rewritten,
+ /// all these instructions can simply be removed and replaced with undef as
+ /// they come from outside of the allocated space.
SmallVector<Instruction *, 8> DeadUsers;
/// \brief Operands which will become dead if we rewrite the alloca.
/// want to swap this particular input for undef to simplify the use lists of
/// the alloca.
SmallVector<Use *, 8> DeadOperands;
-
- /// \brief The underlying storage for auxiliary memcpy and memset info.
- SmallDenseMap<MemTransferInst *, MemTransferOffsets, 4> MemTransferInstData;
-
- /// \brief A side datastructure used when building up the partitions and uses.
- ///
- /// This mapping is only really used during the initial building of the
- /// partitioning so that we can retain information about PHI and select nodes
- /// processed.
- SmallDenseMap<Instruction *, std::pair<uint64_t, bool> > PHIOrSelectSizes;
-
- /// \brief Auxiliary information for particular PHI or select operands.
- SmallDenseMap<Use *, std::pair<unsigned, unsigned>, 4> PHIOrSelectOpMap;
-
- /// \brief A utility routine called from the constructor.
- ///
- /// This does what it says on the tin. It is the key of the alloca partition
- /// splitting and merging. After it is called we have the desired disjoint
- /// collection of partitions.
- void splitAndMergePartitions();
};
}
// early on.
if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
return SI.getOperand(1+CI->isZero());
- if (SI.getOperand(1) == SI.getOperand(2)) {
+ if (SI.getOperand(1) == SI.getOperand(2))
return SI.getOperand(1);
+
+ return nullptr;
+}
+
+/// \brief A helper that folds a PHI node or a select.
+static Value *foldPHINodeOrSelectInst(Instruction &I) {
+ if (PHINode *PN = dyn_cast<PHINode>(&I)) {
+ // If PN merges together the same value, return that value.
+ return PN->hasConstantValue();
}
- return 0;
+ return foldSelectInst(cast<SelectInst>(I));
}
-/// \brief Builder for the alloca partitioning.
+/// \brief Builder for the alloca slices.
///
-/// This class builds an alloca partitioning by recursively visiting the uses
-/// of an alloca and splitting the partitions for each load and store at each
-/// offset.
-class AllocaPartitioning::PartitionBuilder
- : public PtrUseVisitor<PartitionBuilder> {
- friend class PtrUseVisitor<PartitionBuilder>;
- friend class InstVisitor<PartitionBuilder>;
- typedef PtrUseVisitor<PartitionBuilder> Base;
+/// This class builds a set of alloca slices by recursively visiting the uses
+/// of an alloca and making a slice for each load and store at each offset.
+class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> {
+ friend class PtrUseVisitor<SliceBuilder>;
+ friend class InstVisitor<SliceBuilder>;
+ typedef PtrUseVisitor<SliceBuilder> Base;
const uint64_t AllocSize;
- AllocaPartitioning &P;
+ AllocaSlices &S;
- SmallDenseMap<Instruction *, unsigned> MemTransferPartitionMap;
+ SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap;
+ SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes;
+
+ /// \brief Set to de-duplicate dead instructions found in the use walk.
+ SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
public:
- PartitionBuilder(const DataLayout &DL, AllocaInst &AI, AllocaPartitioning &P)
- : PtrUseVisitor<PartitionBuilder>(DL),
- AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())),
- P(P) {}
+ SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &S)
+ : PtrUseVisitor<SliceBuilder>(DL),
+ AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), S(S) {}
private:
+ void markAsDead(Instruction &I) {
+ if (VisitedDeadInsts.insert(&I))
+ S.DeadUsers.push_back(&I);
+ }
+
void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
bool IsSplittable = false) {
// Completely skip uses which have a zero size or start either before or
// past the end of the allocation.
- if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize)) {
+ if (Size == 0 || Offset.uge(AllocSize)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
<< " which has zero size or starts outside of the "
<< AllocSize << " byte alloca:\n"
- << " alloca: " << P.AI << "\n"
+ << " alloca: " << S.AI << "\n"
<< " use: " << I << "\n");
- return;
+ return markAsDead(I);
}
uint64_t BeginOffset = Offset.getZExtValue();
// Clamp the end offset to the end of the allocation. Note that this is
// formulated to handle even the case where "BeginOffset + Size" overflows.
- // NOTE! This may appear superficially to be something we could ignore
- // entirely, but that is not so! There may be PHI-node uses where some
- // instructions are dead but not others. We can't completely ignore the
- // PHI node, and so have to record at least the information here.
+ // This may appear superficially to be something we could ignore entirely,
+ // but that is not so! There may be widened loads or PHI-node uses where
+ // some instructions are dead but not others. We can't completely ignore
+ // them, and so have to record at least the information here.
assert(AllocSize >= BeginOffset); // Established above.
if (Size > AllocSize - BeginOffset) {
DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
<< " to remain within the " << AllocSize << " byte alloca:\n"
- << " alloca: " << P.AI << "\n"
+ << " alloca: " << S.AI << "\n"
<< " use: " << I << "\n");
EndOffset = AllocSize;
}
- Partition New(BeginOffset, EndOffset, IsSplittable);
- P.Partitions.push_back(New);
+ S.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable));
}
- void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
- bool IsVolatile) {
- uint64_t Size = DL.getTypeStoreSize(Ty);
+ void visitBitCastInst(BitCastInst &BC) {
+ if (BC.use_empty())
+ return markAsDead(BC);
- // If this memory access can be shown to *statically* extend outside the
- // bounds of of the allocation, it's behavior is undefined, so simply
- // ignore it. Note that this is more strict than the generic clamping
- // behavior of insertUse. We also try to handle cases which might run the
- // risk of overflow.
- // FIXME: We should instead consider the pointer to have escaped if this
- // function is being instrumented for addressing bugs or race conditions.
- if (Offset.isNegative() || Size > AllocSize ||
- Offset.ugt(AllocSize - Size)) {
- DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte "
- << (isa<LoadInst>(I) ? "load" : "store") << " @" << Offset
- << " which extends past the end of the " << AllocSize
- << " byte alloca:\n"
- << " alloca: " << P.AI << "\n"
- << " use: " << I << "\n");
- return;
+ return Base::visitBitCastInst(BC);
+ }
+
+ void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
+ if (GEPI.use_empty())
+ return markAsDead(GEPI);
+
+ if (SROAStrictInbounds && GEPI.isInBounds()) {
+ // FIXME: This is a manually un-factored variant of the basic code inside
+ // of GEPs with checking of the inbounds invariant specified in the
+ // langref in a very strict sense. If we ever want to enable
+ // SROAStrictInbounds, this code should be factored cleanly into
+ // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds
+ // by writing out the code here where we have tho underlying allocation
+ // size readily available.
+ APInt GEPOffset = Offset;
+ for (gep_type_iterator GTI = gep_type_begin(GEPI),
+ GTE = gep_type_end(GEPI);
+ GTI != GTE; ++GTI) {
+ ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
+ if (!OpC)
+ break;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ unsigned ElementIdx = OpC->getZExtValue();
+ const StructLayout *SL = DL.getStructLayout(STy);
+ GEPOffset +=
+ APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
+ } else {
+ // For array or vector indices, scale the index by the size of the type.
+ APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth());
+ GEPOffset += Index * APInt(Offset.getBitWidth(),
+ DL.getTypeAllocSize(GTI.getIndexedType()));
+ }
+
+ // If this index has computed an intermediate pointer which is not
+ // inbounds, then the result of the GEP is a poison value and we can
+ // delete it and all uses.
+ if (GEPOffset.ugt(AllocSize))
+ return markAsDead(GEPI);
+ }
}
+ return Base::visitGetElementPtrInst(GEPI);
+ }
+
+ void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
+ uint64_t Size, bool IsVolatile) {
// We allow splitting of loads and stores where the type is an integer type
- // and which cover the entire alloca. Such integer loads and stores
- // often require decomposition into fine grained loads and stores.
- bool IsSplittable = false;
- if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
- IsSplittable = !IsVolatile && ITy->getBitWidth() == AllocSize*8;
+ // and cover the entire alloca. This prevents us from splitting over
+ // eagerly.
+ // FIXME: In the great blue eventually, we should eagerly split all integer
+ // loads and stores, and then have a separate step that merges adjacent
+ // alloca partitions into a single partition suitable for integer widening.
+ // Or we should skip the merge step and rely on GVN and other passes to
+ // merge adjacent loads and stores that survive mem2reg.
+ bool IsSplittable =
+ Ty->isIntegerTy() && !IsVolatile && Offset == 0 && Size >= AllocSize;
insertUse(I, Offset, Size, IsSplittable);
}
if (!IsOffsetKnown)
return PI.setAborted(&LI);
- return handleLoadOrStore(LI.getType(), LI, Offset, LI.isVolatile());
+ uint64_t Size = DL.getTypeStoreSize(LI.getType());
+ return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile());
}
void visitStoreInst(StoreInst &SI) {
if (!IsOffsetKnown)
return PI.setAborted(&SI);
+ uint64_t Size = DL.getTypeStoreSize(ValOp->getType());
+
+ // If this memory access can be shown to *statically* extend outside the
+ // bounds of of the allocation, it's behavior is undefined, so simply
+ // ignore it. Note that this is more strict than the generic clamping
+ // behavior of insertUse. We also try to handle cases which might run the
+ // risk of overflow.
+ // FIXME: We should instead consider the pointer to have escaped if this
+ // function is being instrumented for addressing bugs or race conditions.
+ if (Size > AllocSize || Offset.ugt(AllocSize - Size)) {
+ DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset
+ << " which extends past the end of the " << AllocSize
+ << " byte alloca:\n"
+ << " alloca: " << S.AI << "\n"
+ << " use: " << SI << "\n");
+ return markAsDead(SI);
+ }
+
assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
"All simple FCA stores should have been pre-split");
- handleLoadOrStore(ValOp->getType(), SI, Offset, SI.isVolatile());
+ handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile());
}
assert(II.getRawDest() == *U && "Pointer use is not the destination?");
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
if ((Length && Length->getValue() == 0) ||
- (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
+ (IsOffsetKnown && Offset.uge(AllocSize)))
// Zero-length mem transfer intrinsics can be ignored entirely.
- return;
+ return markAsDead(II);
if (!IsOffsetKnown)
return PI.setAborted(&II);
void visitMemTransferInst(MemTransferInst &II) {
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
- if ((Length && Length->getValue() == 0) ||
- (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
+ if (Length && Length->getValue() == 0)
// Zero-length mem transfer intrinsics can be ignored entirely.
+ return markAsDead(II);
+
+ // Because we can visit these intrinsics twice, also check to see if the
+ // first time marked this instruction as dead. If so, skip it.
+ if (VisitedDeadInsts.count(&II))
return;
if (!IsOffsetKnown)
return PI.setAborted(&II);
+ // This side of the transfer is completely out-of-bounds, and so we can
+ // nuke the entire transfer. However, we also need to nuke the other side
+ // if already added to our partitions.
+ // FIXME: Yet another place we really should bypass this when
+ // instrumenting for ASan.
+ if (Offset.uge(AllocSize)) {
+ SmallDenseMap<Instruction *, unsigned>::iterator MTPI = MemTransferSliceMap.find(&II);
+ if (MTPI != MemTransferSliceMap.end())
+ S.Slices[MTPI->second].kill();
+ return markAsDead(II);
+ }
+
uint64_t RawOffset = Offset.getLimitedValue();
uint64_t Size = Length ? Length->getLimitedValue()
: AllocSize - RawOffset;
- MemTransferOffsets &Offsets = P.MemTransferInstData[&II];
-
- // Only intrinsics with a constant length can be split.
- Offsets.IsSplittable = Length;
+ // Check for the special case where the same exact value is used for both
+ // source and dest.
+ if (*U == II.getRawDest() && *U == II.getRawSource()) {
+ // For non-volatile transfers this is a no-op.
+ if (!II.isVolatile())
+ return markAsDead(II);
- if (*U == II.getRawDest()) {
- Offsets.DestBegin = RawOffset;
- Offsets.DestEnd = RawOffset + Size;
- }
- if (*U == II.getRawSource()) {
- Offsets.SourceBegin = RawOffset;
- Offsets.SourceEnd = RawOffset + Size;
+ return insertUse(II, Offset, Size, /*IsSplittable=*/false);
}
- // If we have set up end offsets for both the source and the destination,
- // we have found both sides of this transfer pointing at the same alloca.
- bool SeenBothEnds = Offsets.SourceEnd && Offsets.DestEnd;
- if (SeenBothEnds && II.getRawDest() != II.getRawSource()) {
- unsigned PrevIdx = MemTransferPartitionMap[&II];
+ // If we have seen both source and destination for a mem transfer, then
+ // they both point to the same alloca.
+ bool Inserted;
+ SmallDenseMap<Instruction *, unsigned>::iterator MTPI;
+ std::tie(MTPI, Inserted) =
+ MemTransferSliceMap.insert(std::make_pair(&II, S.Slices.size()));
+ unsigned PrevIdx = MTPI->second;
+ if (!Inserted) {
+ Slice &PrevP = S.Slices[PrevIdx];
// Check if the begin offsets match and this is a non-volatile transfer.
// In that case, we can completely elide the transfer.
- if (!II.isVolatile() && Offsets.SourceBegin == Offsets.DestBegin) {
- P.Partitions[PrevIdx].kill();
- return;
+ if (!II.isVolatile() && PrevP.beginOffset() == RawOffset) {
+ PrevP.kill();
+ return markAsDead(II);
}
// Otherwise we have an offset transfer within the same alloca. We can't
// split those.
- P.Partitions[PrevIdx].IsSplittable = Offsets.IsSplittable = false;
- } else if (SeenBothEnds) {
- // Handle the case where this exact use provides both ends of the
- // operation.
- assert(II.getRawDest() == II.getRawSource());
-
- // For non-volatile transfers this is a no-op.
- if (!II.isVolatile())
- return;
-
- // Otherwise just suppress splitting.
- Offsets.IsSplittable = false;
+ PrevP.makeUnsplittable();
}
-
// Insert the use now that we've fixed up the splittable nature.
- insertUse(II, Offset, Size, Offsets.IsSplittable);
+ insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length);
- // Setup the mapping from intrinsic to partition of we've not seen both
- // ends of this transfer.
- if (!SeenBothEnds) {
- unsigned NewIdx = P.Partitions.size() - 1;
- bool Inserted
- = MemTransferPartitionMap.insert(std::make_pair(&II, NewIdx)).second;
- assert(Inserted &&
- "Already have intrinsic in map but haven't seen both ends");
- (void)Inserted;
- }
+ // Check that we ended up with a valid index in the map.
+ assert(S.Slices[PrevIdx].getUse()->getUser() == &II &&
+ "Map index doesn't point back to a slice with this user.");
}
// Disable SRoA for any intrinsics except for lifetime invariants.
- // FIXME: What about debug instrinsics? This matches old behavior, but
+ // FIXME: What about debug intrinsics? This matches old behavior, but
// doesn't make sense.
void visitIntrinsicInst(IntrinsicInst &II) {
if (!IsOffsetKnown)
Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
// We consider any PHI or select that results in a direct load or store of
- // the same offset to be a viable use for partitioning purposes. These uses
+ // the same offset to be a viable use for slicing purposes. These uses
// are considered unsplittable and the size is the maximum loaded or stored
// size.
SmallPtrSet<Instruction *, 4> Visited;
Size = 0;
do {
Instruction *I, *UsedI;
- llvm::tie(UsedI, I) = Uses.pop_back_val();
+ std::tie(UsedI, I) = Uses.pop_back_val();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Size = std::max(Size, DL.getTypeStoreSize(LI->getType()));
return I;
}
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
- ++UI)
- if (Visited.insert(cast<Instruction>(*UI)))
- Uses.push_back(std::make_pair(I, cast<Instruction>(*UI)));
+ for (User *U : I->users())
+ if (Visited.insert(cast<Instruction>(U)))
+ Uses.push_back(std::make_pair(I, cast<Instruction>(U)));
} while (!Uses.empty());
- return 0;
+ return nullptr;
}
- void visitPHINode(PHINode &PN) {
- if (PN.use_empty())
- return;
- if (!IsOffsetKnown)
- return PI.setAborted(&PN);
-
- // See if we already have computed info on this node.
- std::pair<uint64_t, bool> &PHIInfo = P.PHIOrSelectSizes[&PN];
- if (PHIInfo.first) {
- PHIInfo.second = true;
- insertUse(PN, Offset, PHIInfo.first);
- return;
- }
-
- // Check for an unsafe use of the PHI node.
- if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&PN, PHIInfo.first))
- return PI.setAborted(UnsafeI);
-
- insertUse(PN, Offset, PHIInfo.first);
- }
+ void visitPHINodeOrSelectInst(Instruction &I) {
+ assert(isa<PHINode>(I) || isa<SelectInst>(I));
+ if (I.use_empty())
+ return markAsDead(I);
- void visitSelectInst(SelectInst &SI) {
- if (SI.use_empty())
- return;
- if (Value *Result = foldSelectInst(SI)) {
+ // TODO: We could use SimplifyInstruction here to fold PHINodes and
+ // SelectInsts. However, doing so requires to change the current
+ // dead-operand-tracking mechanism. For instance, suppose neither loading
+ // from %U nor %other traps. Then "load (select undef, %U, %other)" does not
+ // trap either. However, if we simply replace %U with undef using the
+ // current dead-operand-tracking mechanism, "load (select undef, undef,
+ // %other)" may trap because the select may return the first operand
+ // "undef".
+ if (Value *Result = foldPHINodeOrSelectInst(I)) {
if (Result == *U)
// If the result of the constant fold will be the pointer, recurse
- // through the select as if we had RAUW'ed it.
- enqueueUsers(SI);
+ // through the PHI/select as if we had RAUW'ed it.
+ enqueueUsers(I);
+ else
+ // Otherwise the operand to the PHI/select is dead, and we can replace
+ // it with undef.
+ S.DeadOperands.push_back(U);
return;
}
+
if (!IsOffsetKnown)
- return PI.setAborted(&SI);
+ return PI.setAborted(&I);
// See if we already have computed info on this node.
- std::pair<uint64_t, bool> &SelectInfo = P.PHIOrSelectSizes[&SI];
- if (SelectInfo.first) {
- SelectInfo.second = true;
- insertUse(SI, Offset, SelectInfo.first);
- return;
- }
-
- // Check for an unsafe use of the PHI node.
- if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&SI, SelectInfo.first))
- return PI.setAborted(UnsafeI);
-
- insertUse(SI, Offset, SelectInfo.first);
- }
-
- /// \brief Disable SROA entirely if there are unhandled users of the alloca.
- void visitInstruction(Instruction &I) {
- PI.setAborted(&I);
- }
-};
-
-/// \brief Use adder for the alloca partitioning.
-///
-/// This class adds the uses of an alloca to all of the partitions which they
-/// use. For splittable partitions, this can end up doing essentially a linear
-/// walk of the partitions, but the number of steps remains bounded by the
-/// total result instruction size:
-/// - The number of partitions is a result of the number unsplittable
-/// instructions using the alloca.
-/// - The number of users of each partition is at worst the total number of
-/// splittable instructions using the alloca.
-/// Thus we will produce N * M instructions in the end, where N are the number
-/// of unsplittable uses and M are the number of splittable. This visitor does
-/// the exact same number of updates to the partitioning.
-///
-/// In the more common case, this visitor will leverage the fact that the
-/// partition space is pre-sorted, and do a logarithmic search for the
-/// partition needed, making the total visit a classical ((N + M) * log(N))
-/// complexity operation.
-class AllocaPartitioning::UseBuilder : public PtrUseVisitor<UseBuilder> {
- friend class PtrUseVisitor<UseBuilder>;
- friend class InstVisitor<UseBuilder>;
- typedef PtrUseVisitor<UseBuilder> Base;
-
- const uint64_t AllocSize;
- AllocaPartitioning &P;
-
- /// \brief Set to de-duplicate dead instructions found in the use walk.
- SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
-
-public:
- UseBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
- : PtrUseVisitor<UseBuilder>(TD),
- AllocSize(TD.getTypeAllocSize(AI.getAllocatedType())),
- P(P) {}
-
-private:
- void markAsDead(Instruction &I) {
- if (VisitedDeadInsts.insert(&I))
- P.DeadUsers.push_back(&I);
- }
-
- void insertUse(Instruction &User, const APInt &Offset, uint64_t Size) {
- // If the use has a zero size or extends outside of the allocation, record
- // it as a dead use for elimination later.
- if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize))
- return markAsDead(User);
-
- uint64_t BeginOffset = Offset.getZExtValue();
- uint64_t EndOffset = BeginOffset + Size;
-
- // Clamp the end offset to the end of the allocation. Note that this is
- // formulated to handle even the case where "BeginOffset + Size" overflows.
- assert(AllocSize >= BeginOffset); // Established above.
- if (Size > AllocSize - BeginOffset)
- EndOffset = AllocSize;
-
- // NB: This only works if we have zero overlapping partitions.
- iterator B = std::lower_bound(P.begin(), P.end(), BeginOffset);
- if (B != P.begin() && llvm::prior(B)->EndOffset > BeginOffset)
- B = llvm::prior(B);
- for (iterator I = B, E = P.end(); I != E && I->BeginOffset < EndOffset;
- ++I) {
- PartitionUse NewPU(std::max(I->BeginOffset, BeginOffset),
- std::min(I->EndOffset, EndOffset), U);
- P.use_push_back(I, NewPU);
- if (isa<PHINode>(U->getUser()) || isa<SelectInst>(U->getUser()))
- P.PHIOrSelectOpMap[U]
- = std::make_pair(I - P.begin(), P.Uses[I - P.begin()].size() - 1);
+ uint64_t &Size = PHIOrSelectSizes[&I];
+ if (!Size) {
+ // This is a new PHI/Select, check for an unsafe use of it.
+ if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size))
+ return PI.setAborted(UnsafeI);
}
- }
-
- void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset) {
- uint64_t Size = DL.getTypeStoreSize(Ty);
-
- // If this memory access can be shown to *statically* extend outside the
- // bounds of of the allocation, it's behavior is undefined, so simply
- // ignore it. Note that this is more strict than the generic clamping
- // behavior of insertUse.
- if (Offset.isNegative() || Size > AllocSize ||
- Offset.ugt(AllocSize - Size))
- return markAsDead(I);
-
- insertUse(I, Offset, Size);
- }
-
- void visitBitCastInst(BitCastInst &BC) {
- if (BC.use_empty())
- return markAsDead(BC);
-
- return Base::visitBitCastInst(BC);
- }
-
- void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
- if (GEPI.use_empty())
- return markAsDead(GEPI);
-
- return Base::visitGetElementPtrInst(GEPI);
- }
-
- void visitLoadInst(LoadInst &LI) {
- assert(IsOffsetKnown);
- handleLoadOrStore(LI.getType(), LI, Offset);
- }
-
- void visitStoreInst(StoreInst &SI) {
- assert(IsOffsetKnown);
- handleLoadOrStore(SI.getOperand(0)->getType(), SI, Offset);
- }
-
- void visitMemSetInst(MemSetInst &II) {
- ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
- if ((Length && Length->getValue() == 0) ||
- (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
- return markAsDead(II);
-
- assert(IsOffsetKnown);
- insertUse(II, Offset, Length ? Length->getLimitedValue()
- : AllocSize - Offset.getLimitedValue());
- }
-
- void visitMemTransferInst(MemTransferInst &II) {
- ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
- if ((Length && Length->getValue() == 0) ||
- (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
- return markAsDead(II);
-
- assert(IsOffsetKnown);
- uint64_t Size = Length ? Length->getLimitedValue()
- : AllocSize - Offset.getLimitedValue();
-
- MemTransferOffsets &Offsets = P.MemTransferInstData[&II];
- if (!II.isVolatile() && Offsets.DestEnd && Offsets.SourceEnd &&
- Offsets.DestBegin == Offsets.SourceBegin)
- return markAsDead(II); // Skip identity transfers without side-effects.
-
- insertUse(II, Offset, Size);
- }
-
- void visitIntrinsicInst(IntrinsicInst &II) {
- assert(IsOffsetKnown);
- assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
- II.getIntrinsicID() == Intrinsic::lifetime_end);
-
- ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
- insertUse(II, Offset, std::min(Length->getLimitedValue(),
- AllocSize - Offset.getLimitedValue()));
- }
-
- void insertPHIOrSelect(Instruction &User, const APInt &Offset) {
- uint64_t Size = P.PHIOrSelectSizes.lookup(&User).first;
// For PHI and select operands outside the alloca, we can't nuke the entire
// phi or select -- the other side might still be relevant, so we special
// case them here and use a separate structure to track the operands
// themselves which should be replaced with undef.
- if ((Offset.isNegative() && Offset.uge(Size)) ||
- (!Offset.isNegative() && Offset.uge(AllocSize))) {
- P.DeadOperands.push_back(U);
+ // FIXME: This should instead be escaped in the event we're instrumenting
+ // for address sanitization.
+ if (Offset.uge(AllocSize)) {
+ S.DeadOperands.push_back(U);
return;
}
- insertUse(User, Offset, Size);
+ insertUse(I, Offset, Size);
}
void visitPHINode(PHINode &PN) {
- if (PN.use_empty())
- return markAsDead(PN);
-
- assert(IsOffsetKnown);
- insertPHIOrSelect(PN, Offset);
+ visitPHINodeOrSelectInst(PN);
}
void visitSelectInst(SelectInst &SI) {
- if (SI.use_empty())
- return markAsDead(SI);
-
- if (Value *Result = foldSelectInst(SI)) {
- if (Result == *U)
- // If the result of the constant fold will be the pointer, recurse
- // through the select as if we had RAUW'ed it.
- enqueueUsers(SI);
- else
- // Otherwise the operand to the select is dead, and we can replace it
- // with undef.
- P.DeadOperands.push_back(U);
-
- return;
- }
-
- assert(IsOffsetKnown);
- insertPHIOrSelect(SI, Offset);
+ visitPHINodeOrSelectInst(SI);
}
- /// \brief Unreachable, we've already visited the alloca once.
+ /// \brief Disable SROA entirely if there are unhandled users of the alloca.
void visitInstruction(Instruction &I) {
- llvm_unreachable("Unhandled instruction in use builder.");
+ PI.setAborted(&I);
}
};
-void AllocaPartitioning::splitAndMergePartitions() {
- size_t NumDeadPartitions = 0;
-
- // Track the range of splittable partitions that we pass when accumulating
- // overlapping unsplittable partitions.
- uint64_t SplitEndOffset = 0ull;
-
- Partition New(0ull, 0ull, false);
-
- for (unsigned i = 0, j = i, e = Partitions.size(); i != e; i = j) {
- ++j;
-
- if (!Partitions[i].IsSplittable || New.BeginOffset == New.EndOffset) {
- assert(New.BeginOffset == New.EndOffset);
- New = Partitions[i];
- } else {
- assert(New.IsSplittable);
- New.EndOffset = std::max(New.EndOffset, Partitions[i].EndOffset);
- }
- assert(New.BeginOffset != New.EndOffset);
-
- // Scan the overlapping partitions.
- while (j != e && New.EndOffset > Partitions[j].BeginOffset) {
- // If the new partition we are forming is splittable, stop at the first
- // unsplittable partition.
- if (New.IsSplittable && !Partitions[j].IsSplittable)
- break;
-
- // Grow the new partition to include any equally splittable range. 'j' is
- // always equally splittable when New is splittable, but when New is not
- // splittable, we may subsume some (or part of some) splitable partition
- // without growing the new one.
- if (New.IsSplittable == Partitions[j].IsSplittable) {
- New.EndOffset = std::max(New.EndOffset, Partitions[j].EndOffset);
- } else {
- assert(!New.IsSplittable);
- assert(Partitions[j].IsSplittable);
- SplitEndOffset = std::max(SplitEndOffset, Partitions[j].EndOffset);
- }
-
- Partitions[j].kill();
- ++NumDeadPartitions;
- ++j;
- }
-
- // If the new partition is splittable, chop off the end as soon as the
- // unsplittable subsequent partition starts and ensure we eventually cover
- // the splittable area.
- if (j != e && New.IsSplittable) {
- SplitEndOffset = std::max(SplitEndOffset, New.EndOffset);
- New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset);
- }
-
- // Add the new partition if it differs from the original one and is
- // non-empty. We can end up with an empty partition here if it was
- // splittable but there is an unsplittable one that starts at the same
- // offset.
- if (New != Partitions[i]) {
- if (New.BeginOffset != New.EndOffset)
- Partitions.push_back(New);
- // Mark the old one for removal.
- Partitions[i].kill();
- ++NumDeadPartitions;
- }
-
- New.BeginOffset = New.EndOffset;
- if (!New.IsSplittable) {
- New.EndOffset = std::max(New.EndOffset, SplitEndOffset);
- if (j != e && !Partitions[j].IsSplittable)
- New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset);
- New.IsSplittable = true;
- // If there is a trailing splittable partition which won't be fused into
- // the next splittable partition go ahead and add it onto the partitions
- // list.
- if (New.BeginOffset < New.EndOffset &&
- (j == e || !Partitions[j].IsSplittable ||
- New.EndOffset < Partitions[j].BeginOffset)) {
- Partitions.push_back(New);
- New.BeginOffset = New.EndOffset = 0ull;
- }
- }
- }
-
- // Re-sort the partitions now that they have been split and merged into
- // disjoint set of partitions. Also remove any of the dead partitions we've
- // replaced in the process.
- std::sort(Partitions.begin(), Partitions.end());
- if (NumDeadPartitions) {
- assert(Partitions.back().isDead());
- assert((ptrdiff_t)NumDeadPartitions ==
- std::count(Partitions.begin(), Partitions.end(), Partitions.back()));
- }
- Partitions.erase(Partitions.end() - NumDeadPartitions, Partitions.end());
-}
-
-AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI)
+AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
:
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
AI(AI),
#endif
- PointerEscapingInstr(0) {
- PartitionBuilder PB(TD, AI, *this);
- PartitionBuilder::PtrInfo PtrI = PB.visitPtr(AI);
+ PointerEscapingInstr(nullptr) {
+ SliceBuilder PB(DL, AI, *this);
+ SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI);
if (PtrI.isEscaped() || PtrI.isAborted()) {
// FIXME: We should sink the escape vs. abort info into the caller nicely,
- // possibly by just storing the PtrInfo in the AllocaPartitioning.
+ // possibly by just storing the PtrInfo in the AllocaSlices.
PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst()
: PtrI.getAbortingInst();
assert(PointerEscapingInstr && "Did not track a bad instruction");
return;
}
- // Sort the uses. This arranges for the offsets to be in ascending order,
- // and the sizes to be in descending order.
- std::sort(Partitions.begin(), Partitions.end());
-
- // Remove any partitions from the back which are marked as dead.
- while (!Partitions.empty() && Partitions.back().isDead())
- Partitions.pop_back();
-
- if (Partitions.size() > 1) {
- // Intersect splittability for all partitions with equal offsets and sizes.
- // Then remove all but the first so that we have a sequence of non-equal but
- // potentially overlapping partitions.
- for (iterator I = Partitions.begin(), J = I, E = Partitions.end(); I != E;
- I = J) {
- ++J;
- while (J != E && *I == *J) {
- I->IsSplittable &= J->IsSplittable;
- ++J;
- }
- }
- Partitions.erase(std::unique(Partitions.begin(), Partitions.end()),
- Partitions.end());
+ Slices.erase(std::remove_if(Slices.begin(), Slices.end(),
+ std::mem_fun_ref(&Slice::isDead)),
+ Slices.end());
- // Split splittable and merge unsplittable partitions into a disjoint set
- // of partitions over the used space of the allocation.
- splitAndMergePartitions();
+#if __cplusplus >= 201103L && !defined(NDEBUG)
+ if (SROARandomShuffleSlices) {
+ std::mt19937 MT(static_cast<unsigned>(sys::TimeValue::now().msec()));
+ std::shuffle(Slices.begin(), Slices.end(), MT);
}
+#endif
- // Now build up the user lists for each of these disjoint partitions by
- // re-walking the recursive users of the alloca.
- Uses.resize(Partitions.size());
- UseBuilder UB(TD, AI, *this);
- PtrI = UB.visitPtr(AI);
- assert(!PtrI.isEscaped() && "Previously analyzed pointer now escapes!");
- assert(!PtrI.isAborted() && "Early aborted the visit of the pointer.");
+ // Sort the uses. This arranges for the offsets to be in ascending order,
+ // and the sizes to be in descending order.
+ std::sort(Slices.begin(), Slices.end());
}
-Type *AllocaPartitioning::getCommonType(iterator I) const {
- Type *Ty = 0;
- for (const_use_iterator UI = use_begin(I), UE = use_end(I); UI != UE; ++UI) {
- if (!UI->U)
- continue; // Skip dead uses.
- if (isa<IntrinsicInst>(*UI->U->getUser()))
- continue;
- if (UI->BeginOffset != I->BeginOffset || UI->EndOffset != I->EndOffset)
- continue;
-
- Type *UserTy = 0;
- if (LoadInst *LI = dyn_cast<LoadInst>(UI->U->getUser())) {
- UserTy = LI->getType();
- } else if (StoreInst *SI = dyn_cast<StoreInst>(UI->U->getUser())) {
- UserTy = SI->getValueOperand()->getType();
- } else {
- return 0; // Bail if we have weird uses.
- }
-
- if (IntegerType *ITy = dyn_cast<IntegerType>(UserTy)) {
- // If the type is larger than the partition, skip it. We only encounter
- // this for split integer operations where we want to use the type of the
- // entity causing the split.
- if (ITy->getBitWidth() > (I->EndOffset - I->BeginOffset)*8)
- continue;
-
- // If we have found an integer type use covering the alloca, use that
- // regardless of the other types, as integers are often used for a "bucket
- // of bits" type.
- return ITy;
- }
-
- if (Ty && Ty != UserTy)
- return 0;
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- Ty = UserTy;
- }
- return Ty;
+void AllocaSlices::print(raw_ostream &OS, const_iterator I,
+ StringRef Indent) const {
+ printSlice(OS, I, Indent);
+ printUse(OS, I, Indent);
}
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-
-void AllocaPartitioning::print(raw_ostream &OS, const_iterator I,
- StringRef Indent) const {
- OS << Indent << "partition #" << (I - begin())
- << " [" << I->BeginOffset << "," << I->EndOffset << ")"
- << (I->IsSplittable ? " (splittable)" : "")
- << (Uses[I - begin()].empty() ? " (zero uses)" : "")
- << "\n";
+void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I,
+ StringRef Indent) const {
+ OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")"
+ << " slice #" << (I - begin())
+ << (I->isSplittable() ? " (splittable)" : "") << "\n";
}
-void AllocaPartitioning::printUsers(raw_ostream &OS, const_iterator I,
- StringRef Indent) const {
- for (const_use_iterator UI = use_begin(I), UE = use_end(I);
- UI != UE; ++UI) {
- if (!UI->U)
- continue; // Skip dead uses.
- OS << Indent << " [" << UI->BeginOffset << "," << UI->EndOffset << ") "
- << "used by: " << *UI->U->getUser() << "\n";
- if (MemTransferInst *II = dyn_cast<MemTransferInst>(UI->U->getUser())) {
- const MemTransferOffsets &MTO = MemTransferInstData.lookup(II);
- bool IsDest;
- if (!MTO.IsSplittable)
- IsDest = UI->BeginOffset == MTO.DestBegin;
- else
- IsDest = MTO.DestBegin != 0u;
- OS << Indent << " (original " << (IsDest ? "dest" : "source") << ": "
- << "[" << (IsDest ? MTO.DestBegin : MTO.SourceBegin)
- << "," << (IsDest ? MTO.DestEnd : MTO.SourceEnd) << ")\n";
- }
- }
+void AllocaSlices::printUse(raw_ostream &OS, const_iterator I,
+ StringRef Indent) const {
+ OS << Indent << " used by: " << *I->getUse()->getUser() << "\n";
}
-void AllocaPartitioning::print(raw_ostream &OS) const {
+void AllocaSlices::print(raw_ostream &OS) const {
if (PointerEscapingInstr) {
- OS << "No partitioning for alloca: " << AI << "\n"
+ OS << "Can't analyze slices for alloca: " << AI << "\n"
<< " A pointer to this alloca escaped by:\n"
<< " " << *PointerEscapingInstr << "\n";
return;
}
- OS << "Partitioning of alloca: " << AI << "\n";
- unsigned Num = 0;
- for (const_iterator I = begin(), E = end(); I != E; ++I, ++Num) {
+ OS << "Slices of alloca: " << AI << "\n";
+ for (const_iterator I = begin(), E = end(); I != E; ++I)
print(OS, I);
- printUsers(OS, I);
- }
}
-void AllocaPartitioning::dump(const_iterator I) const { print(dbgs(), I); }
-void AllocaPartitioning::dump() const { print(dbgs()); }
+LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const {
+ print(dbgs(), I);
+}
+LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); }
#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-
namespace {
/// \brief Implementation of LoadAndStorePromoter for promoting allocas.
///
SmallVector<DbgValueInst *, 4> DVIs;
public:
- AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
+ AllocaPromoter(const SmallVectorImpl<Instruction *> &Insts, SSAUpdater &S,
AllocaInst &AI, DIBuilder &DIB)
- : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
+ : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
void run(const SmallVectorImpl<Instruction*> &Insts) {
- // Remember which alloca we're promoting (for isInstInList).
+ // Retain the debug information attached to the alloca for use when
+ // rewriting loads and stores.
if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) {
- for (Value::use_iterator UI = DebugNode->use_begin(),
- UE = DebugNode->use_end();
- UI != UE; ++UI)
- if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
+ for (User *U : DebugNode->users())
+ if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
DDIs.push_back(DDI);
- else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
+ else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
DVIs.push_back(DVI);
}
LoadAndStorePromoter::run(Insts);
- AI.eraseFromParent();
+
+ // While we have the debug information, clear it off of the alloca. The
+ // caller takes care of deleting the alloca.
while (!DDIs.empty())
DDIs.pop_back_val()->eraseFromParent();
while (!DVIs.empty())
DVIs.pop_back_val()->eraseFromParent();
}
- virtual bool isInstInList(Instruction *I,
- const SmallVectorImpl<Instruction*> &Insts) const {
+ bool isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction*> &Insts) const override {
+ Value *Ptr;
if (LoadInst *LI = dyn_cast<LoadInst>(I))
- return LI->getOperand(0) == &AI;
- return cast<StoreInst>(I)->getPointerOperand() == &AI;
+ Ptr = LI->getOperand(0);
+ else
+ Ptr = cast<StoreInst>(I)->getPointerOperand();
+
+ // Only used to detect cycles, which will be rare and quickly found as
+ // we're walking up a chain of defs rather than down through uses.
+ SmallPtrSet<Value *, 4> Visited;
+
+ do {
+ if (Ptr == &AI)
+ return true;
+
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(Ptr))
+ Ptr = BCI->getOperand(0);
+ else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
+ Ptr = GEPI->getPointerOperand();
+ else
+ return false;
+
+ } while (Visited.insert(Ptr));
+
+ return false;
}
- virtual void updateDebugInfo(Instruction *Inst) const {
- for (SmallVector<DbgDeclareInst *, 4>::const_iterator I = DDIs.begin(),
+ void updateDebugInfo(Instruction *Inst) const override {
+ for (SmallVectorImpl<DbgDeclareInst *>::const_iterator I = DDIs.begin(),
E = DDIs.end(); I != E; ++I) {
DbgDeclareInst *DDI = *I;
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
}
- for (SmallVector<DbgValueInst *, 4>::const_iterator I = DVIs.begin(),
+ for (SmallVectorImpl<DbgValueInst *>::const_iterator I = DVIs.begin(),
E = DVIs.end(); I != E; ++I) {
DbgValueInst *DVI = *I;
- Value *Arg = NULL;
+ Value *Arg = nullptr;
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
// If an argument is zero extended then use argument directly. The ZExt
// may be zapped by an optimization pass in future.
if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
Arg = dyn_cast<Argument>(ZExt->getOperand(0));
- if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
+ else if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
Arg = dyn_cast<Argument>(SExt->getOperand(0));
if (!Arg)
- Arg = SI->getOperand(0);
+ Arg = SI->getValueOperand();
} else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
- Arg = LI->getOperand(0);
+ Arg = LI->getPointerOperand();
} else {
continue;
}
/// 1) It takes allocations of aggregates and analyzes the ways in which they
/// are used to try to split them into smaller allocations, ideally of
/// a single scalar data type. It will split up memcpy and memset accesses
-/// as necessary and try to isolate invidual scalar accesses.
+/// as necessary and try to isolate individual scalar accesses.
/// 2) It will transform accesses into forms which are suitable for SSA value
/// promotion. This can be replacing a memset with a scalar store of an
/// integer value, or it can involve speculating operations on a PHI or
const bool RequiresDomTree;
LLVMContext *C;
- const DataLayout *TD;
+ const DataLayout *DL;
DominatorTree *DT;
/// \brief Worklist of alloca instructions to simplify.
/// \brief A collection of alloca instructions we can directly promote.
std::vector<AllocaInst *> PromotableAllocas;
+ /// \brief A worklist of PHIs to speculate prior to promoting allocas.
+ ///
+ /// All of these PHIs have been checked for the safety of speculation and by
+ /// being speculated will allow promoting allocas currently in the promotable
+ /// queue.
+ SetVector<PHINode *, SmallVector<PHINode *, 2> > SpeculatablePHIs;
+
+ /// \brief A worklist of select instructions to speculate prior to promoting
+ /// allocas.
+ ///
+ /// All of these select instructions have been checked for the safety of
+ /// speculation and by being speculated will allow promoting allocas
+ /// currently in the promotable queue.
+ SetVector<SelectInst *, SmallVector<SelectInst *, 2> > SpeculatableSelects;
+
public:
SROA(bool RequiresDomTree = true)
: FunctionPass(ID), RequiresDomTree(RequiresDomTree),
- C(0), TD(0), DT(0) {
+ C(nullptr), DL(nullptr), DT(nullptr) {
initializeSROAPass(*PassRegistry::getPassRegistry());
}
- bool runOnFunction(Function &F);
- void getAnalysisUsage(AnalysisUsage &AU) const;
+ bool runOnFunction(Function &F) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
- const char *getPassName() const { return "SROA"; }
+ const char *getPassName() const override { return "SROA"; }
static char ID;
private:
friend class PHIOrSelectSpeculator;
- friend class AllocaPartitionRewriter;
- friend class AllocaPartitionVectorRewriter;
+ friend class AllocaSliceRewriter;
- bool rewriteAllocaPartition(AllocaInst &AI,
- AllocaPartitioning &P,
- AllocaPartitioning::iterator PI);
- bool splitAlloca(AllocaInst &AI, AllocaPartitioning &P);
+ bool rewritePartition(AllocaInst &AI, AllocaSlices &S,
+ AllocaSlices::iterator B, AllocaSlices::iterator E,
+ int64_t BeginOffset, int64_t EndOffset,
+ ArrayRef<AllocaSlices::iterator> SplitUses);
+ bool splitAlloca(AllocaInst &AI, AllocaSlices &S);
bool runOnAlloca(AllocaInst &AI);
- void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas);
+ void clobberUse(Use &U);
+ void deleteDeadInstructions(SmallPtrSetImpl<AllocaInst *> &DeletedAllocas);
bool promoteAllocas(Function &F);
};
}
INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates",
false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
false, false)
-namespace {
-/// \brief Visitor to speculate PHIs and Selects where possible.
-class PHIOrSelectSpeculator : public InstVisitor<PHIOrSelectSpeculator> {
- // Befriend the base class so it can delegate to private visit methods.
- friend class llvm::InstVisitor<PHIOrSelectSpeculator>;
-
- const DataLayout &TD;
- AllocaPartitioning &P;
- SROA &Pass;
+/// Walk the range of a partitioning looking for a common type to cover this
+/// sequence of slices.
+static Type *findCommonType(AllocaSlices::const_iterator B,
+ AllocaSlices::const_iterator E,
+ uint64_t EndOffset) {
+ Type *Ty = nullptr;
+ bool TyIsCommon = true;
+ IntegerType *ITy = nullptr;
+
+ // Note that we need to look at *every* alloca slice's Use to ensure we
+ // always get consistent results regardless of the order of slices.
+ for (AllocaSlices::const_iterator I = B; I != E; ++I) {
+ Use *U = I->getUse();
+ if (isa<IntrinsicInst>(*U->getUser()))
+ continue;
+ if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset)
+ continue;
-public:
- PHIOrSelectSpeculator(const DataLayout &TD, AllocaPartitioning &P, SROA &Pass)
- : TD(TD), P(P), Pass(Pass) {}
+ Type *UserTy = nullptr;
+ if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
+ UserTy = LI->getType();
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
+ UserTy = SI->getValueOperand()->getType();
+ }
- /// \brief Visit the users of an alloca partition and rewrite them.
- void visitUsers(AllocaPartitioning::const_iterator PI) {
- // Note that we need to use an index here as the underlying vector of uses
- // may be grown during speculation. However, we never need to re-visit the
- // new uses, and so we can use the initial size bound.
- for (unsigned Idx = 0, Size = P.use_size(PI); Idx != Size; ++Idx) {
- const AllocaPartitioning::PartitionUse &PU = P.getUse(PI, Idx);
- if (!PU.U)
- continue; // Skip dead use.
+ if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) {
+ // If the type is larger than the partition, skip it. We only encounter
+ // this for split integer operations where we want to use the type of the
+ // entity causing the split. Also skip if the type is not a byte width
+ // multiple.
+ if (UserITy->getBitWidth() % 8 != 0 ||
+ UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
+ continue;
- visit(cast<Instruction>(PU.U->getUser()));
+ // Track the largest bitwidth integer type used in this way in case there
+ // is no common type.
+ if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth())
+ ITy = UserITy;
}
+
+ // To avoid depending on the order of slices, Ty and TyIsCommon must not
+ // depend on types skipped above.
+ if (!UserTy || (Ty && Ty != UserTy))
+ TyIsCommon = false; // Give up on anything but an iN type.
+ else
+ Ty = UserTy;
}
-private:
- // By default, skip this instruction.
- void visitInstruction(Instruction &I) {}
-
- /// PHI instructions that use an alloca and are subsequently loaded can be
- /// rewritten to load both input pointers in the pred blocks and then PHI the
- /// results, allowing the load of the alloca to be promoted.
- /// From this:
- /// %P2 = phi [i32* %Alloca, i32* %Other]
- /// %V = load i32* %P2
- /// to:
- /// %V1 = load i32* %Alloca -> will be mem2reg'd
- /// ...
- /// %V2 = load i32* %Other
- /// ...
- /// %V = phi [i32 %V1, i32 %V2]
- ///
- /// We can do this to a select if its only uses are loads and if the operands
- /// to the select can be loaded unconditionally.
- ///
- /// FIXME: This should be hoisted into a generic utility, likely in
- /// Transforms/Util/Local.h
- bool isSafePHIToSpeculate(PHINode &PN, SmallVectorImpl<LoadInst *> &Loads) {
- // For now, we can only do this promotion if the load is in the same block
- // as the PHI, and if there are no stores between the phi and load.
- // TODO: Allow recursive phi users.
- // TODO: Allow stores.
- BasicBlock *BB = PN.getParent();
- unsigned MaxAlign = 0;
- for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end();
- UI != UE; ++UI) {
- LoadInst *LI = dyn_cast<LoadInst>(*UI);
- if (LI == 0 || !LI->isSimple()) return false;
-
- // For now we only allow loads in the same block as the PHI. This is
- // a common case that happens when instcombine merges two loads through
- // a PHI.
- if (LI->getParent() != BB) return false;
-
- // Ensure that there are no instructions between the PHI and the load that
- // could store.
- for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI)
- if (BBI->mayWriteToMemory())
- return false;
-
- MaxAlign = std::max(MaxAlign, LI->getAlignment());
- Loads.push_back(LI);
- }
-
- // We can only transform this if it is safe to push the loads into the
- // predecessor blocks. The only thing to watch out for is that we can't put
- // a possibly trapping load in the predecessor if it is a critical edge.
- for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num;
- ++Idx) {
- TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator();
- Value *InVal = PN.getIncomingValue(Idx);
-
- // If the value is produced by the terminator of the predecessor (an
- // invoke) or it has side-effects, there is no valid place to put a load
- // in the predecessor.
- if (TI == InVal || TI->mayHaveSideEffects())
+ return TyIsCommon ? Ty : ITy;
+}
+
+/// PHI instructions that use an alloca and are subsequently loaded can be
+/// rewritten to load both input pointers in the pred blocks and then PHI the
+/// results, allowing the load of the alloca to be promoted.
+/// From this:
+/// %P2 = phi [i32* %Alloca, i32* %Other]
+/// %V = load i32* %P2
+/// to:
+/// %V1 = load i32* %Alloca -> will be mem2reg'd
+/// ...
+/// %V2 = load i32* %Other
+/// ...
+/// %V = phi [i32 %V1, i32 %V2]
+///
+/// We can do this to a select if its only uses are loads and if the operands
+/// to the select can be loaded unconditionally.
+///
+/// FIXME: This should be hoisted into a generic utility, likely in
+/// Transforms/Util/Local.h
+static bool isSafePHIToSpeculate(PHINode &PN,
+ const DataLayout *DL = nullptr) {
+ // For now, we can only do this promotion if the load is in the same block
+ // as the PHI, and if there are no stores between the phi and load.
+ // TODO: Allow recursive phi users.
+ // TODO: Allow stores.
+ BasicBlock *BB = PN.getParent();
+ unsigned MaxAlign = 0;
+ bool HaveLoad = false;
+ for (User *U : PN.users()) {
+ LoadInst *LI = dyn_cast<LoadInst>(U);
+ if (!LI || !LI->isSimple())
+ return false;
+
+ // For now we only allow loads in the same block as the PHI. This is
+ // a common case that happens when instcombine merges two loads through
+ // a PHI.
+ if (LI->getParent() != BB)
+ return false;
+
+ // Ensure that there are no instructions between the PHI and the load that
+ // could store.
+ for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI)
+ if (BBI->mayWriteToMemory())
return false;
- // If the predecessor has a single successor, then the edge isn't
- // critical.
- if (TI->getNumSuccessors() == 1)
- continue;
+ MaxAlign = std::max(MaxAlign, LI->getAlignment());
+ HaveLoad = true;
+ }
- // If this pointer is always safe to load, or if we can prove that there
- // is already a load in the block, then we can move the load to the pred
- // block.
- if (InVal->isDereferenceablePointer() ||
- isSafeToLoadUnconditionally(InVal, TI, MaxAlign, &TD))
- continue;
+ if (!HaveLoad)
+ return false;
+ // We can only transform this if it is safe to push the loads into the
+ // predecessor blocks. The only thing to watch out for is that we can't put
+ // a possibly trapping load in the predecessor if it is a critical edge.
+ for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
+ TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator();
+ Value *InVal = PN.getIncomingValue(Idx);
+
+ // If the value is produced by the terminator of the predecessor (an
+ // invoke) or it has side-effects, there is no valid place to put a load
+ // in the predecessor.
+ if (TI == InVal || TI->mayHaveSideEffects())
return false;
- }
- return true;
- }
+ // If the predecessor has a single successor, then the edge isn't
+ // critical.
+ if (TI->getNumSuccessors() == 1)
+ continue;
- void visitPHINode(PHINode &PN) {
- DEBUG(dbgs() << " original: " << PN << "\n");
+ // If this pointer is always safe to load, or if we can prove that there
+ // is already a load in the block, then we can move the load to the pred
+ // block.
+ if (InVal->isDereferenceablePointer(DL) ||
+ isSafeToLoadUnconditionally(InVal, TI, MaxAlign, DL))
+ continue;
- SmallVector<LoadInst *, 4> Loads;
- if (!isSafePHIToSpeculate(PN, Loads))
- return;
+ return false;
+ }
- assert(!Loads.empty());
+ return true;
+}
- Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
- IRBuilder<> PHIBuilder(&PN);
- PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
- PN.getName() + ".sroa.speculated");
+static void speculatePHINodeLoads(PHINode &PN) {
+ DEBUG(dbgs() << " original: " << PN << "\n");
- // Get the TBAA tag and alignment to use from one of the loads. It doesn't
- // matter which one we get and if any differ, it doesn't matter.
- LoadInst *SomeLoad = cast<LoadInst>(Loads.back());
- MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
- unsigned Align = SomeLoad->getAlignment();
+ Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
+ IRBuilderTy PHIBuilder(&PN);
+ PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
+ PN.getName() + ".sroa.speculated");
- // Rewrite all loads of the PN to use the new PHI.
- do {
- LoadInst *LI = Loads.pop_back_val();
- LI->replaceAllUsesWith(NewPN);
- Pass.DeadInsts.insert(LI);
- } while (!Loads.empty());
-
- // Inject loads into all of the pred blocks.
- for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
- BasicBlock *Pred = PN.getIncomingBlock(Idx);
- TerminatorInst *TI = Pred->getTerminator();
- Use *InUse = &PN.getOperandUse(PN.getOperandNumForIncomingValue(Idx));
- Value *InVal = PN.getIncomingValue(Idx);
- IRBuilder<> PredBuilder(TI);
-
- LoadInst *Load
- = PredBuilder.CreateLoad(InVal, (PN.getName() + ".sroa.speculate.load." +
- Pred->getName()));
- ++NumLoadsSpeculated;
- Load->setAlignment(Align);
- if (TBAATag)
- Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
- NewPN->addIncoming(Load, Pred);
-
- Instruction *Ptr = dyn_cast<Instruction>(InVal);
- if (!Ptr)
- // No uses to rewrite.
- continue;
+ // Get the AA tags and alignment to use from one of the loads. It doesn't
+ // matter which one we get and if any differ.
+ LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
- // Try to lookup and rewrite any partition uses corresponding to this phi
- // input.
- AllocaPartitioning::iterator PI
- = P.findPartitionForPHIOrSelectOperand(InUse);
- if (PI == P.end())
- continue;
+ AAMDNodes AATags;
+ SomeLoad->getAAMetadata(AATags);
+ unsigned Align = SomeLoad->getAlignment();
- // Replace the Use in the PartitionUse for this operand with the Use
- // inside the load.
- AllocaPartitioning::use_iterator UI
- = P.findPartitionUseForPHIOrSelectOperand(InUse);
- assert(isa<PHINode>(*UI->U->getUser()));
- UI->U = &Load->getOperandUse(Load->getPointerOperandIndex());
- }
- DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
- }
-
- /// Select instructions that use an alloca and are subsequently loaded can be
- /// rewritten to load both input pointers and then select between the result,
- /// allowing the load of the alloca to be promoted.
- /// From this:
- /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
- /// %V = load i32* %P2
- /// to:
- /// %V1 = load i32* %Alloca -> will be mem2reg'd
- /// %V2 = load i32* %Other
- /// %V = select i1 %cond, i32 %V1, i32 %V2
- ///
- /// We can do this to a select if its only uses are loads and if the operand
- /// to the select can be loaded unconditionally.
- bool isSafeSelectToSpeculate(SelectInst &SI,
- SmallVectorImpl<LoadInst *> &Loads) {
- Value *TValue = SI.getTrueValue();
- Value *FValue = SI.getFalseValue();
- bool TDerefable = TValue->isDereferenceablePointer();
- bool FDerefable = FValue->isDereferenceablePointer();
-
- for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end();
- UI != UE; ++UI) {
- LoadInst *LI = dyn_cast<LoadInst>(*UI);
- if (LI == 0 || !LI->isSimple()) return false;
-
- // Both operands to the select need to be dereferencable, either
- // absolutely (e.g. allocas) or at this point because we can see other
- // accesses to it.
- if (!TDerefable && !isSafeToLoadUnconditionally(TValue, LI,
- LI->getAlignment(), &TD))
- return false;
- if (!FDerefable && !isSafeToLoadUnconditionally(FValue, LI,
- LI->getAlignment(), &TD))
- return false;
- Loads.push_back(LI);
- }
+ // Rewrite all loads of the PN to use the new PHI.
+ while (!PN.use_empty()) {
+ LoadInst *LI = cast<LoadInst>(PN.user_back());
+ LI->replaceAllUsesWith(NewPN);
+ LI->eraseFromParent();
+ }
- return true;
+ // Inject loads into all of the pred blocks.
+ for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
+ BasicBlock *Pred = PN.getIncomingBlock(Idx);
+ TerminatorInst *TI = Pred->getTerminator();
+ Value *InVal = PN.getIncomingValue(Idx);
+ IRBuilderTy PredBuilder(TI);
+
+ LoadInst *Load = PredBuilder.CreateLoad(
+ InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
+ ++NumLoadsSpeculated;
+ Load->setAlignment(Align);
+ if (AATags)
+ Load->setAAMetadata(AATags);
+ NewPN->addIncoming(Load, Pred);
}
- void visitSelectInst(SelectInst &SI) {
- DEBUG(dbgs() << " original: " << SI << "\n");
- IRBuilder<> IRB(&SI);
+ DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
+ PN.eraseFromParent();
+}
+
+/// Select instructions that use an alloca and are subsequently loaded can be
+/// rewritten to load both input pointers and then select between the result,
+/// allowing the load of the alloca to be promoted.
+/// From this:
+/// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
+/// %V = load i32* %P2
+/// to:
+/// %V1 = load i32* %Alloca -> will be mem2reg'd
+/// %V2 = load i32* %Other
+/// %V = select i1 %cond, i32 %V1, i32 %V2
+///
+/// We can do this to a select if its only uses are loads and if the operand
+/// to the select can be loaded unconditionally.
+static bool isSafeSelectToSpeculate(SelectInst &SI,
+ const DataLayout *DL = nullptr) {
+ Value *TValue = SI.getTrueValue();
+ Value *FValue = SI.getFalseValue();
+ bool TDerefable = TValue->isDereferenceablePointer(DL);
+ bool FDerefable = FValue->isDereferenceablePointer(DL);
+
+ for (User *U : SI.users()) {
+ LoadInst *LI = dyn_cast<LoadInst>(U);
+ if (!LI || !LI->isSimple())
+ return false;
+
+ // Both operands to the select need to be dereferencable, either
+ // absolutely (e.g. allocas) or at this point because we can see other
+ // accesses to it.
+ if (!TDerefable &&
+ !isSafeToLoadUnconditionally(TValue, LI, LI->getAlignment(), DL))
+ return false;
+ if (!FDerefable &&
+ !isSafeToLoadUnconditionally(FValue, LI, LI->getAlignment(), DL))
+ return false;
+ }
- // If the select isn't safe to speculate, just use simple logic to emit it.
- SmallVector<LoadInst *, 4> Loads;
- if (!isSafeSelectToSpeculate(SI, Loads))
- return;
+ return true;
+}
- Use *Ops[2] = { &SI.getOperandUse(1), &SI.getOperandUse(2) };
- AllocaPartitioning::iterator PIs[2];
- AllocaPartitioning::PartitionUse PUs[2];
- for (unsigned i = 0, e = 2; i != e; ++i) {
- PIs[i] = P.findPartitionForPHIOrSelectOperand(Ops[i]);
- if (PIs[i] != P.end()) {
- // If the pointer is within the partitioning, remove the select from
- // its uses. We'll add in the new loads below.
- AllocaPartitioning::use_iterator UI
- = P.findPartitionUseForPHIOrSelectOperand(Ops[i]);
- PUs[i] = *UI;
- // Clear out the use here so that the offsets into the use list remain
- // stable but this use is ignored when rewriting.
- UI->U = 0;
- }
- }
+static void speculateSelectInstLoads(SelectInst &SI) {
+ DEBUG(dbgs() << " original: " << SI << "\n");
- Value *TV = SI.getTrueValue();
- Value *FV = SI.getFalseValue();
- // Replace the loads of the select with a select of two loads.
- while (!Loads.empty()) {
- LoadInst *LI = Loads.pop_back_val();
+ IRBuilderTy IRB(&SI);
+ Value *TV = SI.getTrueValue();
+ Value *FV = SI.getFalseValue();
+ // Replace the loads of the select with a select of two loads.
+ while (!SI.use_empty()) {
+ LoadInst *LI = cast<LoadInst>(SI.user_back());
+ assert(LI->isSimple() && "We only speculate simple loads");
- IRB.SetInsertPoint(LI);
- LoadInst *TL =
+ IRB.SetInsertPoint(LI);
+ LoadInst *TL =
IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
- LoadInst *FL =
+ LoadInst *FL =
IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
- NumLoadsSpeculated += 2;
-
- // Transfer alignment and TBAA info if present.
- TL->setAlignment(LI->getAlignment());
- FL->setAlignment(LI->getAlignment());
- if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
- TL->setMetadata(LLVMContext::MD_tbaa, Tag);
- FL->setMetadata(LLVMContext::MD_tbaa, Tag);
- }
-
- Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
- LI->getName() + ".sroa.speculated");
+ NumLoadsSpeculated += 2;
- LoadInst *Loads[2] = { TL, FL };
- for (unsigned i = 0, e = 2; i != e; ++i) {
- if (PIs[i] != P.end()) {
- Use *LoadUse = &Loads[i]->getOperandUse(0);
- assert(PUs[i].U->get() == LoadUse->get());
- PUs[i].U = LoadUse;
- P.use_push_back(PIs[i], PUs[i]);
- }
- }
+ // Transfer alignment and AA info if present.
+ TL->setAlignment(LI->getAlignment());
+ FL->setAlignment(LI->getAlignment());
- DEBUG(dbgs() << " speculated to: " << *V << "\n");
- LI->replaceAllUsesWith(V);
- Pass.DeadInsts.insert(LI);
+ AAMDNodes Tags;
+ LI->getAAMetadata(Tags);
+ if (Tags) {
+ TL->setAAMetadata(Tags);
+ FL->setAAMetadata(Tags);
}
+
+ Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
+ LI->getName() + ".sroa.speculated");
+
+ DEBUG(dbgs() << " speculated to: " << *V << "\n");
+ LI->replaceAllUsesWith(V);
+ LI->eraseFromParent();
}
-};
+ SI.eraseFromParent();
}
/// \brief Build a GEP out of a base pointer and indices.
///
/// This will return the BasePtr if that is valid, or build a new GEP
/// instruction using the IRBuilder if GEP-ing is needed.
-static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr,
- SmallVectorImpl<Value *> &Indices,
- const Twine &Prefix) {
+static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
+ SmallVectorImpl<Value *> &Indices, Twine NamePrefix) {
if (Indices.empty())
return BasePtr;
if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
return BasePtr;
- return IRB.CreateInBoundsGEP(BasePtr, Indices, Prefix + ".idx");
+ return IRB.CreateInBoundsGEP(BasePtr, Indices, NamePrefix + "sroa_idx");
}
/// \brief Get a natural GEP off of the BasePtr walking through Ty toward
/// TargetTy. If we can't find one with the same type, we at least try to use
/// one with the same size. If none of that works, we just produce the GEP as
/// indicated by Indices to have the correct offset.
-static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD,
+static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
Value *BasePtr, Type *Ty, Type *TargetTy,
SmallVectorImpl<Value *> &Indices,
- const Twine &Prefix) {
+ Twine NamePrefix) {
if (Ty == TargetTy)
- return buildGEP(IRB, BasePtr, Indices, Prefix);
+ return buildGEP(IRB, BasePtr, Indices, NamePrefix);
+
+ // Pointer size to use for the indices.
+ unsigned PtrSize = DL.getPointerTypeSizeInBits(BasePtr->getType());
// See if we can descend into a struct and locate a field with the correct
// type.
do {
if (ElementTy->isPointerTy())
break;
- if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) {
- ElementTy = SeqTy->getElementType();
- // Note that we use the default address space as this index is over an
- // array or a vector, not a pointer.
- Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(0), 0)));
+
+ if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) {
+ ElementTy = ArrayTy->getElementType();
+ Indices.push_back(IRB.getIntN(PtrSize, 0));
+ } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) {
+ ElementTy = VectorTy->getElementType();
+ Indices.push_back(IRB.getInt32(0));
} else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
if (STy->element_begin() == STy->element_end())
break; // Nothing left to descend into.
if (ElementTy != TargetTy)
Indices.erase(Indices.end() - NumLayers, Indices.end());
- return buildGEP(IRB, BasePtr, Indices, Prefix);
+ return buildGEP(IRB, BasePtr, Indices, NamePrefix);
}
/// \brief Recursively compute indices for a natural GEP.
///
/// This is the recursive step for getNaturalGEPWithOffset that walks down the
/// element types adding appropriate indices for the GEP.
-static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD,
+static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
Value *Ptr, Type *Ty, APInt &Offset,
Type *TargetTy,
SmallVectorImpl<Value *> &Indices,
- const Twine &Prefix) {
+ Twine NamePrefix) {
if (Offset == 0)
- return getNaturalGEPWithType(IRB, TD, Ptr, Ty, TargetTy, Indices, Prefix);
+ return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, NamePrefix);
// We can't recurse through pointer types.
if (Ty->isPointerTy())
- return 0;
+ return nullptr;
// We try to analyze GEPs over vectors here, but note that these GEPs are
// extremely poorly defined currently. The long-term goal is to remove GEPing
// over a vector from the IR completely.
if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
- unsigned ElementSizeInBits = TD.getTypeSizeInBits(VecTy->getScalarType());
- if (ElementSizeInBits % 8)
- return 0; // GEPs over non-multiple of 8 size vector elements are invalid.
+ unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType());
+ if (ElementSizeInBits % 8 != 0) {
+ // GEPs over non-multiple of 8 size vector elements are invalid.
+ return nullptr;
+ }
APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
APInt NumSkippedElements = Offset.sdiv(ElementSize);
if (NumSkippedElements.ugt(VecTy->getNumElements()))
- return 0;
+ return nullptr;
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
- return getNaturalGEPRecursively(IRB, TD, Ptr, VecTy->getElementType(),
- Offset, TargetTy, Indices, Prefix);
+ return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(),
+ Offset, TargetTy, Indices, NamePrefix);
}
if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
Type *ElementTy = ArrTy->getElementType();
- APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy));
+ APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
APInt NumSkippedElements = Offset.sdiv(ElementSize);
if (NumSkippedElements.ugt(ArrTy->getNumElements()))
- return 0;
+ return nullptr;
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
- return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
- Indices, Prefix);
+ return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
+ Indices, NamePrefix);
}
StructType *STy = dyn_cast<StructType>(Ty);
if (!STy)
- return 0;
+ return nullptr;
- const StructLayout *SL = TD.getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
uint64_t StructOffset = Offset.getZExtValue();
if (StructOffset >= SL->getSizeInBytes())
- return 0;
+ return nullptr;
unsigned Index = SL->getElementContainingOffset(StructOffset);
Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
Type *ElementTy = STy->getElementType(Index);
- if (Offset.uge(TD.getTypeAllocSize(ElementTy)))
- return 0; // The offset points into alignment padding.
+ if (Offset.uge(DL.getTypeAllocSize(ElementTy)))
+ return nullptr; // The offset points into alignment padding.
Indices.push_back(IRB.getInt32(Index));
- return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
- Indices, Prefix);
+ return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
+ Indices, NamePrefix);
}
/// \brief Get a natural GEP from a base pointer to a particular offset and
/// Indices, and setting Ty to the result subtype.
///
/// If no natural GEP can be constructed, this function returns null.
-static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD,
+static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
Value *Ptr, APInt Offset, Type *TargetTy,
SmallVectorImpl<Value *> &Indices,
- const Twine &Prefix) {
+ Twine NamePrefix) {
PointerType *Ty = cast<PointerType>(Ptr->getType());
// Don't consider any GEPs through an i8* as natural unless the TargetTy is
// an i8.
- if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8))
- return 0;
+ if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8))
+ return nullptr;
Type *ElementTy = Ty->getElementType();
if (!ElementTy->isSized())
- return 0; // We can't GEP through an unsized element.
- APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy));
+ return nullptr; // We can't GEP through an unsized element.
+ APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
if (ElementSize == 0)
- return 0; // Zero-length arrays can't help us build a natural GEP.
+ return nullptr; // Zero-length arrays can't help us build a natural GEP.
APInt NumSkippedElements = Offset.sdiv(ElementSize);
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
- return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
- Indices, Prefix);
+ return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
+ Indices, NamePrefix);
}
/// \brief Compute an adjusted pointer from Ptr by Offset bytes where the
/// The strategy for finding the more natural GEPs is to peel off layers of the
/// pointer, walking back through bit casts and GEPs, searching for a base
/// pointer from which we can compute a natural GEP with the desired
-/// properities. The algorithm tries to fold as many constant indices into
+/// properties. The algorithm tries to fold as many constant indices into
/// a single GEP as possible, thus making each GEP more independent of the
/// surrounding code.
-static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD,
- Value *Ptr, APInt Offset, Type *PointerTy,
- const Twine &Prefix) {
+static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
+ APInt Offset, Type *PointerTy,
+ Twine NamePrefix) {
// Even though we don't look through PHI nodes, we could be called on an
// instruction in an unreachable block, which may be on a cycle.
SmallPtrSet<Value *, 4> Visited;
// We may end up computing an offset pointer that has the wrong type. If we
// never are able to compute one directly that has the correct type, we'll
// fall back to it, so keep it around here.
- Value *OffsetPtr = 0;
+ Value *OffsetPtr = nullptr;
// Remember any i8 pointer we come across to re-use if we need to do a raw
// byte offset.
- Value *Int8Ptr = 0;
+ Value *Int8Ptr = nullptr;
APInt Int8PtrOffset(Offset.getBitWidth(), 0);
Type *TargetTy = PointerTy->getPointerElementType();
// First fold any existing GEPs into the offset.
while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
APInt GEPOffset(Offset.getBitWidth(), 0);
- if (!GEP->accumulateConstantOffset(TD, GEPOffset))
+ if (!GEP->accumulateConstantOffset(DL, GEPOffset))
break;
Offset += GEPOffset;
Ptr = GEP->getPointerOperand();
// See if we can perform a natural GEP here.
Indices.clear();
- if (Value *P = getNaturalGEPWithOffset(IRB, TD, Ptr, Offset, TargetTy,
- Indices, Prefix)) {
+ if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
+ Indices, NamePrefix)) {
if (P->getType() == PointerTy) {
// Zap any offset pointer that we ended up computing in previous rounds.
if (OffsetPtr && OffsetPtr->use_empty())
if (!OffsetPtr) {
if (!Int8Ptr) {
- Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(),
- Prefix + ".raw_cast");
+ Int8Ptr = IRB.CreateBitCast(
+ Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()),
+ NamePrefix + "sroa_raw_cast");
Int8PtrOffset = Offset;
}
OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr :
IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset),
- Prefix + ".raw_idx");
+ NamePrefix + "sroa_raw_idx");
}
Ptr = OffsetPtr;
// On the off chance we were targeting i8*, guard the bitcast here.
if (Ptr->getType() != PointerTy)
- Ptr = IRB.CreateBitCast(Ptr, PointerTy, Prefix + ".cast");
+ Ptr = IRB.CreateBitCast(Ptr, PointerTy, NamePrefix + "sroa_cast");
return Ptr;
}
static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
if (OldTy == NewTy)
return true;
+ if (IntegerType *OldITy = dyn_cast<IntegerType>(OldTy))
+ if (IntegerType *NewITy = dyn_cast<IntegerType>(NewTy))
+ if (NewITy->getBitWidth() >= OldITy->getBitWidth())
+ return true;
if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy))
return false;
if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
return false;
+ // We can convert pointers to integers and vice-versa. Same for vectors
+ // of pointers and integers.
+ OldTy = OldTy->getScalarType();
+ NewTy = NewTy->getScalarType();
if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
if (NewTy->isPointerTy() && OldTy->isPointerTy())
return true;
/// This will try various different casting techniques, such as bitcasts,
/// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
/// two types for viability with this routine.
-static Value *convertValue(const DataLayout &DL, IRBuilder<> &IRB, Value *V,
- Type *Ty) {
- assert(canConvertValue(DL, V->getType(), Ty) &&
- "Value not convertable to type");
- if (V->getType() == Ty)
+static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
+ Type *NewTy) {
+ Type *OldTy = V->getType();
+ assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type");
+
+ if (OldTy == NewTy)
return V;
- if (V->getType()->isIntegerTy() && Ty->isPointerTy())
- return IRB.CreateIntToPtr(V, Ty);
- if (V->getType()->isPointerTy() && Ty->isIntegerTy())
- return IRB.CreatePtrToInt(V, Ty);
- return IRB.CreateBitCast(V, Ty);
+ if (IntegerType *OldITy = dyn_cast<IntegerType>(OldTy))
+ if (IntegerType *NewITy = dyn_cast<IntegerType>(NewTy))
+ if (NewITy->getBitWidth() > OldITy->getBitWidth())
+ return IRB.CreateZExt(V, NewITy);
+
+ // See if we need inttoptr for this type pair. A cast involving both scalars
+ // and vectors requires and additional bitcast.
+ if (OldTy->getScalarType()->isIntegerTy() &&
+ NewTy->getScalarType()->isPointerTy()) {
+ // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
+ if (OldTy->isVectorTy() && !NewTy->isVectorTy())
+ return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
+ NewTy);
+
+ // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*>
+ if (!OldTy->isVectorTy() && NewTy->isVectorTy())
+ return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
+ NewTy);
+
+ return IRB.CreateIntToPtr(V, NewTy);
+ }
+
+ // See if we need ptrtoint for this type pair. A cast involving both scalars
+ // and vectors requires and additional bitcast.
+ if (OldTy->getScalarType()->isPointerTy() &&
+ NewTy->getScalarType()->isIntegerTy()) {
+ // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
+ if (OldTy->isVectorTy() && !NewTy->isVectorTy())
+ return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
+ NewTy);
+
+ // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32>
+ if (!OldTy->isVectorTy() && NewTy->isVectorTy())
+ return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
+ NewTy);
+
+ return IRB.CreatePtrToInt(V, NewTy);
+ }
+
+ return IRB.CreateBitCast(V, NewTy);
+}
+
+/// \brief Test whether the given slice use can be promoted to a vector.
+///
+/// This function is called to test each entry in a partioning which is slated
+/// for a single slice.
+static bool isVectorPromotionViableForSlice(
+ const DataLayout &DL, AllocaSlices &S, uint64_t SliceBeginOffset,
+ uint64_t SliceEndOffset, VectorType *Ty, uint64_t ElementSize,
+ AllocaSlices::const_iterator I) {
+ // First validate the slice offsets.
+ uint64_t BeginOffset =
+ std::max(I->beginOffset(), SliceBeginOffset) - SliceBeginOffset;
+ uint64_t BeginIndex = BeginOffset / ElementSize;
+ if (BeginIndex * ElementSize != BeginOffset ||
+ BeginIndex >= Ty->getNumElements())
+ return false;
+ uint64_t EndOffset =
+ std::min(I->endOffset(), SliceEndOffset) - SliceBeginOffset;
+ uint64_t EndIndex = EndOffset / ElementSize;
+ if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements())
+ return false;
+
+ assert(EndIndex > BeginIndex && "Empty vector!");
+ uint64_t NumElements = EndIndex - BeginIndex;
+ Type *SliceTy =
+ (NumElements == 1) ? Ty->getElementType()
+ : VectorType::get(Ty->getElementType(), NumElements);
+
+ Type *SplitIntTy =
+ Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8);
+
+ Use *U = I->getUse();
+
+ if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
+ if (MI->isVolatile())
+ return false;
+ if (!I->isSplittable())
+ return false; // Skip any unsplittable intrinsics.
+ } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
+ if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
+ II->getIntrinsicID() != Intrinsic::lifetime_end)
+ return false;
+ } else if (U->get()->getType()->getPointerElementType()->isStructTy()) {
+ // Disable vector promotion when there are loads or stores of an FCA.
+ return false;
+ } else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
+ if (LI->isVolatile())
+ return false;
+ Type *LTy = LI->getType();
+ if (SliceBeginOffset > I->beginOffset() ||
+ SliceEndOffset < I->endOffset()) {
+ assert(LTy->isIntegerTy());
+ LTy = SplitIntTy;
+ }
+ if (!canConvertValue(DL, SliceTy, LTy))
+ return false;
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
+ if (SI->isVolatile())
+ return false;
+ Type *STy = SI->getValueOperand()->getType();
+ if (SliceBeginOffset > I->beginOffset() ||
+ SliceEndOffset < I->endOffset()) {
+ assert(STy->isIntegerTy());
+ STy = SplitIntTy;
+ }
+ if (!canConvertValue(DL, STy, SliceTy))
+ return false;
+ } else {
+ return false;
+ }
+
+ return true;
}
-/// \brief Test whether the given alloca partition can be promoted to a vector.
+/// \brief Test whether the given alloca partitioning and range of slices can be
+/// promoted to a vector.
///
/// This is a quick test to check whether we can rewrite a particular alloca
/// partition (and its newly formed alloca) into a vector alloca with only
/// SSA value. We only can ensure this for a limited set of operations, and we
/// don't want to do the rewrites unless we are confident that the result will
/// be promotable, so we have an early test here.
-static bool isVectorPromotionViable(const DataLayout &TD,
- Type *AllocaTy,
- AllocaPartitioning &P,
- uint64_t PartitionBeginOffset,
- uint64_t PartitionEndOffset,
- AllocaPartitioning::const_use_iterator I,
- AllocaPartitioning::const_use_iterator E) {
+static bool
+isVectorPromotionViable(const DataLayout &DL, Type *AllocaTy, AllocaSlices &S,
+ uint64_t SliceBeginOffset, uint64_t SliceEndOffset,
+ AllocaSlices::const_iterator I,
+ AllocaSlices::const_iterator E,
+ ArrayRef<AllocaSlices::iterator> SplitUses) {
VectorType *Ty = dyn_cast<VectorType>(AllocaTy);
if (!Ty)
return false;
- uint64_t ElementSize = TD.getTypeSizeInBits(Ty->getScalarType());
+ uint64_t ElementSize = DL.getTypeSizeInBits(Ty->getScalarType());
// While the definition of LLVM vectors is bitpacked, we don't support sizes
// that aren't byte sized.
if (ElementSize % 8)
return false;
- assert((TD.getTypeSizeInBits(Ty) % 8) == 0 &&
+ assert((DL.getTypeSizeInBits(Ty) % 8) == 0 &&
"vector size not a multiple of element size?");
ElementSize /= 8;
- for (; I != E; ++I) {
- if (!I->U)
- continue; // Skip dead use.
-
- uint64_t BeginOffset = I->BeginOffset - PartitionBeginOffset;
- uint64_t BeginIndex = BeginOffset / ElementSize;
- if (BeginIndex * ElementSize != BeginOffset ||
- BeginIndex >= Ty->getNumElements())
+ for (; I != E; ++I)
+ if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset,
+ SliceEndOffset, Ty, ElementSize, I))
return false;
- uint64_t EndOffset = I->EndOffset - PartitionBeginOffset;
- uint64_t EndIndex = EndOffset / ElementSize;
- if (EndIndex * ElementSize != EndOffset ||
- EndIndex > Ty->getNumElements())
+
+ for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(),
+ SUE = SplitUses.end();
+ SUI != SUE; ++SUI)
+ if (!isVectorPromotionViableForSlice(DL, S, SliceBeginOffset,
+ SliceEndOffset, Ty, ElementSize, *SUI))
return false;
- assert(EndIndex > BeginIndex && "Empty vector!");
- uint64_t NumElements = EndIndex - BeginIndex;
- Type *PartitionTy
- = (NumElements == 1) ? Ty->getElementType()
- : VectorType::get(Ty->getElementType(), NumElements);
+ return true;
+}
- if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I->U->getUser())) {
- if (MI->isVolatile())
- return false;
- if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I->U->getUser())) {
- const AllocaPartitioning::MemTransferOffsets &MTO
- = P.getMemTransferOffsets(*MTI);
- if (!MTO.IsSplittable)
- return false;
- }
- } else if (I->U->get()->getType()->getPointerElementType()->isStructTy()) {
- // Disable vector promotion when there are loads or stores of an FCA.
+/// \brief Test whether a slice of an alloca is valid for integer widening.
+///
+/// This implements the necessary checking for the \c isIntegerWideningViable
+/// test below on a single slice of the alloca.
+static bool isIntegerWideningViableForSlice(const DataLayout &DL,
+ Type *AllocaTy,
+ uint64_t AllocBeginOffset,
+ uint64_t Size, AllocaSlices &S,
+ AllocaSlices::const_iterator I,
+ bool &WholeAllocaOp) {
+ uint64_t RelBegin = I->beginOffset() - AllocBeginOffset;
+ uint64_t RelEnd = I->endOffset() - AllocBeginOffset;
+
+ // We can't reasonably handle cases where the load or store extends past
+ // the end of the aloca's type and into its padding.
+ if (RelEnd > Size)
+ return false;
+
+ Use *U = I->getUse();
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
+ if (LI->isVolatile())
return false;
- } else if (LoadInst *LI = dyn_cast<LoadInst>(I->U->getUser())) {
- if (LI->isVolatile())
- return false;
- if (!canConvertValue(TD, PartitionTy, LI->getType()))
+ if (RelBegin == 0 && RelEnd == Size)
+ WholeAllocaOp = true;
+ if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
+ if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
return false;
- } else if (StoreInst *SI = dyn_cast<StoreInst>(I->U->getUser())) {
- if (SI->isVolatile())
- return false;
- if (!canConvertValue(TD, SI->getValueOperand()->getType(), PartitionTy))
+ } else if (RelBegin != 0 || RelEnd != Size ||
+ !canConvertValue(DL, AllocaTy, LI->getType())) {
+ // Non-integer loads need to be convertible from the alloca type so that
+ // they are promotable.
+ return false;
+ }
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
+ Type *ValueTy = SI->getValueOperand()->getType();
+ if (SI->isVolatile())
+ return false;
+ if (RelBegin == 0 && RelEnd == Size)
+ WholeAllocaOp = true;
+ if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
+ if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
return false;
- } else {
+ } else if (RelBegin != 0 || RelEnd != Size ||
+ !canConvertValue(DL, ValueTy, AllocaTy)) {
+ // Non-integer stores need to be convertible to the alloca type so that
+ // they are promotable.
return false;
}
+ } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
+ if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
+ return false;
+ if (!I->isSplittable())
+ return false; // Skip any unsplittable intrinsics.
+ } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
+ if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
+ II->getIntrinsicID() != Intrinsic::lifetime_end)
+ return false;
+ } else {
+ return false;
}
+
return true;
}
/// This is a quick test to check whether we can rewrite the integer loads and
/// stores to a particular alloca into wider loads and stores and be able to
/// promote the resulting alloca.
-static bool isIntegerWideningViable(const DataLayout &TD,
- Type *AllocaTy,
- uint64_t AllocBeginOffset,
- AllocaPartitioning &P,
- AllocaPartitioning::const_use_iterator I,
- AllocaPartitioning::const_use_iterator E) {
- uint64_t SizeInBits = TD.getTypeSizeInBits(AllocaTy);
+static bool
+isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy,
+ uint64_t AllocBeginOffset, AllocaSlices &S,
+ AllocaSlices::const_iterator I,
+ AllocaSlices::const_iterator E,
+ ArrayRef<AllocaSlices::iterator> SplitUses) {
+ uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy);
// Don't create integer types larger than the maximum bitwidth.
if (SizeInBits > IntegerType::MAX_INT_BITS)
return false;
// Don't try to handle allocas with bit-padding.
- if (SizeInBits != TD.getTypeStoreSizeInBits(AllocaTy))
+ if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy))
return false;
// We need to ensure that an integer type with the appropriate bitwidth can
// be converted to the alloca type, whatever that is. We don't want to force
// the alloca itself to have an integer type if there is a more suitable one.
Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
- if (!canConvertValue(TD, AllocaTy, IntTy) ||
- !canConvertValue(TD, IntTy, AllocaTy))
+ if (!canConvertValue(DL, AllocaTy, IntTy) ||
+ !canConvertValue(DL, IntTy, AllocaTy))
return false;
- uint64_t Size = TD.getTypeStoreSize(AllocaTy);
+ uint64_t Size = DL.getTypeStoreSize(AllocaTy);
- // Check the uses to ensure the uses are (likely) promoteable integer uses.
- // Also ensure that the alloca has a covering load or store. We don't want
- // to widen the integer operotains only to fail to promote due to some other
- // unsplittable entry (which we may make splittable later).
- bool WholeAllocaOp = false;
- for (; I != E; ++I) {
- if (!I->U)
- continue; // Skip dead use.
+ // While examining uses, we ensure that the alloca has a covering load or
+ // store. We don't want to widen the integer operations only to fail to
+ // promote due to some other unsplittable entry (which we may make splittable
+ // later). However, if there are only splittable uses, go ahead and assume
+ // that we cover the alloca.
+ bool WholeAllocaOp = (I != E) ? false : DL.isLegalInteger(SizeInBits);
- uint64_t RelBegin = I->BeginOffset - AllocBeginOffset;
- uint64_t RelEnd = I->EndOffset - AllocBeginOffset;
-
- // We can't reasonably handle cases where the load or store extends past
- // the end of the aloca's type and into its padding.
- if (RelEnd > Size)
+ for (; I != E; ++I)
+ if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
+ S, I, WholeAllocaOp))
return false;
- if (LoadInst *LI = dyn_cast<LoadInst>(I->U->getUser())) {
- if (LI->isVolatile())
- return false;
- if (RelBegin == 0 && RelEnd == Size)
- WholeAllocaOp = true;
- if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
- if (ITy->getBitWidth() < TD.getTypeStoreSizeInBits(ITy))
- return false;
- continue;
- }
- // Non-integer loads need to be convertible from the alloca type so that
- // they are promotable.
- if (RelBegin != 0 || RelEnd != Size ||
- !canConvertValue(TD, AllocaTy, LI->getType()))
- return false;
- } else if (StoreInst *SI = dyn_cast<StoreInst>(I->U->getUser())) {
- Type *ValueTy = SI->getValueOperand()->getType();
- if (SI->isVolatile())
- return false;
- if (RelBegin == 0 && RelEnd == Size)
- WholeAllocaOp = true;
- if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
- if (ITy->getBitWidth() < TD.getTypeStoreSizeInBits(ITy))
- return false;
- continue;
- }
- // Non-integer stores need to be convertible to the alloca type so that
- // they are promotable.
- if (RelBegin != 0 || RelEnd != Size ||
- !canConvertValue(TD, ValueTy, AllocaTy))
- return false;
- } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I->U->getUser())) {
- if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
- return false;
- if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I->U->getUser())) {
- const AllocaPartitioning::MemTransferOffsets &MTO
- = P.getMemTransferOffsets(*MTI);
- if (!MTO.IsSplittable)
- return false;
- }
- } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->U->getUser())) {
- if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
- II->getIntrinsicID() != Intrinsic::lifetime_end)
- return false;
- } else {
+ for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(),
+ SUE = SplitUses.end();
+ SUI != SUE; ++SUI)
+ if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
+ S, *SUI, WholeAllocaOp))
return false;
- }
- }
+
return WholeAllocaOp;
}
-static Value *extractInteger(const DataLayout &DL, IRBuilder<> &IRB, Value *V,
+static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
IntegerType *Ty, uint64_t Offset,
const Twine &Name) {
DEBUG(dbgs() << " start: " << *V << "\n");
return V;
}
-static Value *insertInteger(const DataLayout &DL, IRBuilder<> &IRB, Value *Old,
+static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
Value *V, uint64_t Offset, const Twine &Name) {
IntegerType *IntTy = cast<IntegerType>(Old->getType());
IntegerType *Ty = cast<IntegerType>(V->getType());
return V;
}
-static Value *extractVector(IRBuilder<> &IRB, Value *V,
+static Value *extractVector(IRBuilderTy &IRB, Value *V,
unsigned BeginIndex, unsigned EndIndex,
const Twine &Name) {
VectorType *VecTy = cast<VectorType>(V->getType());
return V;
}
-static Value *insertVector(IRBuilder<> &IRB, Value *Old, Value *V,
+static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
unsigned BeginIndex, const Twine &Name) {
VectorType *VecTy = cast<VectorType>(Old->getType());
assert(VecTy && "Can only insert a vector into a vector");
V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
ConstantVector::get(Mask),
Name + ".expand");
- DEBUG(dbgs() << " shuffle1: " << *V << "\n");
+ DEBUG(dbgs() << " shuffle: " << *V << "\n");
Mask.clear();
for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
- if (i >= BeginIndex && i < EndIndex)
- Mask.push_back(IRB.getInt32(i));
- else
- Mask.push_back(IRB.getInt32(i + VecTy->getNumElements()));
- V = IRB.CreateShuffleVector(V, Old, ConstantVector::get(Mask),
- Name + "insert");
- DEBUG(dbgs() << " shuffle2: " << *V << "\n");
+ Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex));
+
+ V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend");
+
+ DEBUG(dbgs() << " blend: " << *V << "\n");
return V;
}
namespace {
-/// \brief Visitor to rewrite instructions using a partition of an alloca to
-/// use a new alloca.
+/// \brief Visitor to rewrite instructions using p particular slice of an alloca
+/// to use a new alloca.
///
/// Also implements the rewriting to vector-based accesses when the partition
/// passes the isVectorPromotionViable predicate. Most of the rewriting logic
/// lives here.
-class AllocaPartitionRewriter : public InstVisitor<AllocaPartitionRewriter,
- bool> {
+class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
// Befriend the base class so it can delegate to private visit methods.
- friend class llvm::InstVisitor<AllocaPartitionRewriter, bool>;
+ friend class llvm::InstVisitor<AllocaSliceRewriter, bool>;
+ typedef llvm::InstVisitor<AllocaSliceRewriter, bool> Base;
- const DataLayout &TD;
- AllocaPartitioning &P;
+ const DataLayout &DL;
+ AllocaSlices &S;
SROA &Pass;
AllocaInst &OldAI, &NewAI;
const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
// If we are rewriting an alloca partition which can be written as pure
// vector operations, we stash extra information here. When VecTy is
- // non-null, we have some strict guarantees about the rewriten alloca:
+ // non-null, we have some strict guarantees about the rewritten alloca:
// - The new alloca is exactly the size of the vector type here.
// - The accesses all either map to the entire vector or to a single
// element.
// integer type will be stored here for easy access during rewriting.
IntegerType *IntTy;
- // The offset of the partition user currently being rewritten.
+ // The original offset of the slice currently being rewritten relative to
+ // the original alloca.
uint64_t BeginOffset, EndOffset;
+ // The new offsets of the slice currently being rewritten relative to the
+ // original alloca.
+ uint64_t NewBeginOffset, NewEndOffset;
+
+ uint64_t SliceSize;
+ bool IsSplittable;
+ bool IsSplit;
Use *OldUse;
Instruction *OldPtr;
- // The name prefix to use when rewriting instructions for this alloca.
- std::string NamePrefix;
+ // Track post-rewrite users which are PHI nodes and Selects.
+ SmallPtrSetImpl<PHINode *> &PHIUsers;
+ SmallPtrSetImpl<SelectInst *> &SelectUsers;
+
+ // Utility IR builder, whose name prefix is setup for each visited use, and
+ // the insertion point is set to point to the user.
+ IRBuilderTy IRB;
public:
- AllocaPartitionRewriter(const DataLayout &TD, AllocaPartitioning &P,
- AllocaPartitioning::iterator PI,
- SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI,
- uint64_t NewBeginOffset, uint64_t NewEndOffset)
- : TD(TD), P(P), Pass(Pass),
- OldAI(OldAI), NewAI(NewAI),
- NewAllocaBeginOffset(NewBeginOffset),
- NewAllocaEndOffset(NewEndOffset),
- NewAllocaTy(NewAI.getAllocatedType()),
- VecTy(), ElementTy(), ElementSize(), IntTy(),
- BeginOffset(), EndOffset() {
- }
-
- /// \brief Visit the users of the alloca partition and rewrite them.
- bool visitUsers(AllocaPartitioning::const_use_iterator I,
- AllocaPartitioning::const_use_iterator E) {
- if (isVectorPromotionViable(TD, NewAI.getAllocatedType(), P,
- NewAllocaBeginOffset, NewAllocaEndOffset,
- I, E)) {
- ++NumVectorized;
- VecTy = cast<VectorType>(NewAI.getAllocatedType());
- ElementTy = VecTy->getElementType();
- assert((TD.getTypeSizeInBits(VecTy->getScalarType()) % 8) == 0 &&
+ AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &S, SROA &Pass,
+ AllocaInst &OldAI, AllocaInst &NewAI,
+ uint64_t NewAllocaBeginOffset,
+ uint64_t NewAllocaEndOffset, bool IsVectorPromotable,
+ bool IsIntegerPromotable,
+ SmallPtrSetImpl<PHINode *> &PHIUsers,
+ SmallPtrSetImpl<SelectInst *> &SelectUsers)
+ : DL(DL), S(S), Pass(Pass), OldAI(OldAI), NewAI(NewAI),
+ NewAllocaBeginOffset(NewAllocaBeginOffset),
+ NewAllocaEndOffset(NewAllocaEndOffset),
+ NewAllocaTy(NewAI.getAllocatedType()),
+ VecTy(IsVectorPromotable ? cast<VectorType>(NewAllocaTy) : nullptr),
+ ElementTy(VecTy ? VecTy->getElementType() : nullptr),
+ ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0),
+ IntTy(IsIntegerPromotable
+ ? Type::getIntNTy(
+ NewAI.getContext(),
+ DL.getTypeSizeInBits(NewAI.getAllocatedType()))
+ : nullptr),
+ BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(),
+ OldPtr(), PHIUsers(PHIUsers), SelectUsers(SelectUsers),
+ IRB(NewAI.getContext(), ConstantFolder()) {
+ if (VecTy) {
+ assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 &&
"Only multiple-of-8 sized vector elements are viable");
- ElementSize = TD.getTypeSizeInBits(VecTy->getScalarType()) / 8;
- } else if (isIntegerWideningViable(TD, NewAI.getAllocatedType(),
- NewAllocaBeginOffset, P, I, E)) {
- IntTy = Type::getIntNTy(NewAI.getContext(),
- TD.getTypeSizeInBits(NewAI.getAllocatedType()));
+ ++NumVectorized;
}
+ assert((!IsVectorPromotable && !IsIntegerPromotable) ||
+ IsVectorPromotable != IsIntegerPromotable);
+ }
+
+ bool visit(AllocaSlices::const_iterator I) {
bool CanSROA = true;
- for (; I != E; ++I) {
- if (!I->U)
- continue; // Skip dead uses.
- BeginOffset = I->BeginOffset;
- EndOffset = I->EndOffset;
- OldUse = I->U;
- OldPtr = cast<Instruction>(I->U->get());
- NamePrefix = (Twine(NewAI.getName()) + "." + Twine(BeginOffset)).str();
- CanSROA &= visit(cast<Instruction>(I->U->getUser()));
- }
- if (VecTy) {
- assert(CanSROA);
- VecTy = 0;
- ElementTy = 0;
- ElementSize = 0;
- }
- if (IntTy) {
+ BeginOffset = I->beginOffset();
+ EndOffset = I->endOffset();
+ IsSplittable = I->isSplittable();
+ IsSplit =
+ BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset;
+
+ // Compute the intersecting offset range.
+ assert(BeginOffset < NewAllocaEndOffset);
+ assert(EndOffset > NewAllocaBeginOffset);
+ NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
+ NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
+
+ SliceSize = NewEndOffset - NewBeginOffset;
+
+ OldUse = I->getUse();
+ OldPtr = cast<Instruction>(OldUse->get());
+
+ Instruction *OldUserI = cast<Instruction>(OldUse->getUser());
+ IRB.SetInsertPoint(OldUserI);
+ IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc());
+ IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + ".");
+
+ CanSROA &= visit(cast<Instruction>(OldUse->getUser()));
+ if (VecTy || IntTy)
assert(CanSROA);
- IntTy = 0;
- }
return CanSROA;
}
private:
+ // Make sure the other visit overloads are visible.
+ using Base::visit;
+
// Every instruction which can end up as a user must have a rewrite rule.
bool visitInstruction(Instruction &I) {
DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n");
llvm_unreachable("No rewrite rule for this instruction!");
}
- Twine getName(const Twine &Suffix) {
- return NamePrefix + Suffix;
- }
+ Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) {
+ // Note that the offset computation can use BeginOffset or NewBeginOffset
+ // interchangeably for unsplit slices.
+ assert(IsSplit || BeginOffset == NewBeginOffset);
+ uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
+
+#ifndef NDEBUG
+ StringRef OldName = OldPtr->getName();
+ // Skip through the last '.sroa.' component of the name.
+ size_t LastSROAPrefix = OldName.rfind(".sroa.");
+ if (LastSROAPrefix != StringRef::npos) {
+ OldName = OldName.substr(LastSROAPrefix + strlen(".sroa."));
+ // Look for an SROA slice index.
+ size_t IndexEnd = OldName.find_first_not_of("0123456789");
+ if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') {
+ // Strip the index and look for the offset.
+ OldName = OldName.substr(IndexEnd + 1);
+ size_t OffsetEnd = OldName.find_first_not_of("0123456789");
+ if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.')
+ // Strip the offset.
+ OldName = OldName.substr(OffsetEnd + 1);
+ }
+ }
+ // Strip any SROA suffixes as well.
+ OldName = OldName.substr(0, OldName.find(".sroa_"));
+#endif
- Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) {
- assert(BeginOffset >= NewAllocaBeginOffset);
- APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset);
- return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName(""));
+ return getAdjustedPtr(IRB, DL, &NewAI,
+ APInt(DL.getPointerSizeInBits(), Offset), PointerTy,
+#ifndef NDEBUG
+ Twine(OldName) + "."
+#else
+ Twine()
+#endif
+ );
}
- /// \brief Compute suitable alignment to access an offset into the new alloca.
- unsigned getOffsetAlign(uint64_t Offset) {
+ /// \brief Compute suitable alignment to access this slice of the *new* alloca.
+ ///
+ /// You can optionally pass a type to this routine and if that type's ABI
+ /// alignment is itself suitable, this will return zero.
+ unsigned getSliceAlign(Type *Ty = nullptr) {
unsigned NewAIAlign = NewAI.getAlignment();
if (!NewAIAlign)
- NewAIAlign = TD.getABITypeAlignment(NewAI.getAllocatedType());
- return MinAlign(NewAIAlign, Offset);
- }
-
- /// \brief Compute suitable alignment to access this partition of the new
- /// alloca.
- unsigned getPartitionAlign() {
- return getOffsetAlign(BeginOffset - NewAllocaBeginOffset);
- }
-
- /// \brief Compute suitable alignment to access a type at an offset of the
- /// new alloca.
- ///
- /// \returns zero if the type's ABI alignment is a suitable alignment,
- /// otherwise returns the maximal suitable alignment.
- unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) {
- unsigned Align = getOffsetAlign(Offset);
- return Align == TD.getABITypeAlignment(Ty) ? 0 : Align;
- }
-
- /// \brief Compute suitable alignment to access a type at the beginning of
- /// this partition of the new alloca.
- ///
- /// See \c getOffsetTypeAlign for details; this routine delegates to it.
- unsigned getPartitionTypeAlign(Type *Ty) {
- return getOffsetTypeAlign(Ty, BeginOffset - NewAllocaBeginOffset);
+ NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType());
+ unsigned Align = MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset);
+ return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align;
}
unsigned getIndex(uint64_t Offset) {
Pass.DeadInsts.insert(I);
}
- Value *rewriteVectorizedLoadInst(IRBuilder<> &IRB) {
- unsigned BeginIndex = getIndex(BeginOffset);
- unsigned EndIndex = getIndex(EndOffset);
+ Value *rewriteVectorizedLoadInst() {
+ unsigned BeginIndex = getIndex(NewBeginOffset);
+ unsigned EndIndex = getIndex(NewEndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
- getName(".load"));
- return extractVector(IRB, V, BeginIndex, EndIndex, getName(".vec"));
+ "load");
+ return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
}
- Value *rewriteIntegerLoad(IRBuilder<> &IRB, LoadInst &LI) {
+ Value *rewriteIntegerLoad(LoadInst &LI) {
assert(IntTy && "We cannot insert an integer to the alloca");
assert(!LI.isVolatile());
Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
- getName(".load"));
- V = convertValue(TD, IRB, V, IntTy);
- assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
- uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
- if (Offset > 0 || EndOffset < NewAllocaEndOffset)
- V = extractInteger(TD, IRB, V, cast<IntegerType>(LI.getType()), Offset,
- getName(".extract"));
+ "load");
+ V = convertValue(DL, IRB, V, IntTy);
+ assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
+ uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
+ if (Offset > 0 || NewEndOffset < NewAllocaEndOffset)
+ V = extractInteger(DL, IRB, V, cast<IntegerType>(LI.getType()), Offset,
+ "extract");
return V;
}
DEBUG(dbgs() << " original: " << LI << "\n");
Value *OldOp = LI.getOperand(0);
assert(OldOp == OldPtr);
- IRBuilder<> IRB(&LI);
-
- uint64_t Size = EndOffset - BeginOffset;
- bool IsSplitIntLoad = Size < TD.getTypeStoreSize(LI.getType());
-
- // If this memory access can be shown to *statically* extend outside the
- // bounds of the original allocation it's behavior is undefined. Rather
- // than trying to transform it, just replace it with undef.
- // FIXME: We should do something more clever for functions being
- // instrumented by asan.
- // FIXME: Eventually, once ASan and friends can flush out bugs here, this
- // should be transformed to a load of null making it unreachable.
- uint64_t OldAllocSize = TD.getTypeAllocSize(OldAI.getAllocatedType());
- if (TD.getTypeStoreSize(LI.getType()) > OldAllocSize) {
- LI.replaceAllUsesWith(UndefValue::get(LI.getType()));
- Pass.DeadInsts.insert(&LI);
- deleteIfTriviallyDead(OldOp);
- DEBUG(dbgs() << " to: undef!!\n");
- return true;
- }
- Type *TargetTy = IsSplitIntLoad ? Type::getIntNTy(LI.getContext(), Size * 8)
- : LI.getType();
+ Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8)
+ : LI.getType();
bool IsPtrAdjusted = false;
Value *V;
if (VecTy) {
- V = rewriteVectorizedLoadInst(IRB);
+ V = rewriteVectorizedLoadInst();
} else if (IntTy && LI.getType()->isIntegerTy()) {
- V = rewriteIntegerLoad(IRB, LI);
- } else if (BeginOffset == NewAllocaBeginOffset &&
- canConvertValue(TD, NewAllocaTy, LI.getType())) {
+ V = rewriteIntegerLoad(LI);
+ } else if (NewBeginOffset == NewAllocaBeginOffset &&
+ canConvertValue(DL, NewAllocaTy, LI.getType())) {
V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
- LI.isVolatile(), getName(".load"));
+ LI.isVolatile(), LI.getName());
} else {
Type *LTy = TargetTy->getPointerTo();
- V = IRB.CreateAlignedLoad(getAdjustedAllocaPtr(IRB, LTy),
- getPartitionTypeAlign(TargetTy),
- LI.isVolatile(), getName(".load"));
+ V = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
+ getSliceAlign(TargetTy), LI.isVolatile(),
+ LI.getName());
IsPtrAdjusted = true;
}
- V = convertValue(TD, IRB, V, TargetTy);
+ V = convertValue(DL, IRB, V, TargetTy);
- if (IsSplitIntLoad) {
+ if (IsSplit) {
assert(!LI.isVolatile());
assert(LI.getType()->isIntegerTy() &&
"Only integer type loads and stores are split");
+ assert(SliceSize < DL.getTypeStoreSize(LI.getType()) &&
+ "Split load isn't smaller than original load");
assert(LI.getType()->getIntegerBitWidth() ==
- TD.getTypeStoreSizeInBits(LI.getType()) &&
+ DL.getTypeStoreSizeInBits(LI.getType()) &&
"Non-byte-multiple bit width");
- assert(LI.getType()->getIntegerBitWidth() ==
- TD.getTypeAllocSizeInBits(OldAI.getAllocatedType()) &&
- "Only alloca-wide loads can be split and recomposed");
// Move the insertion point just past the load so that we can refer to it.
- IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI)));
+ IRB.SetInsertPoint(std::next(BasicBlock::iterator(&LI)));
// Create a placeholder value with the same type as LI to use as the
// basis for the new value. This allows us to replace the uses of LI with
// the computed value, and then replace the placeholder with LI, leaving
// LI only used for this computation.
Value *Placeholder
= new LoadInst(UndefValue::get(LI.getType()->getPointerTo()));
- V = insertInteger(TD, IRB, Placeholder, V, BeginOffset,
- getName(".insert"));
+ V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset,
+ "insert");
LI.replaceAllUsesWith(V);
Placeholder->replaceAllUsesWith(&LI);
delete Placeholder;
return !LI.isVolatile() && !IsPtrAdjusted;
}
- bool rewriteVectorizedStoreInst(IRBuilder<> &IRB, Value *V,
- StoreInst &SI, Value *OldOp) {
- unsigned BeginIndex = getIndex(BeginOffset);
- unsigned EndIndex = getIndex(EndOffset);
- assert(EndIndex > BeginIndex && "Empty vector!");
- unsigned NumElements = EndIndex - BeginIndex;
- assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
- Type *PartitionTy
- = (NumElements == 1) ? ElementTy
- : VectorType::get(ElementTy, NumElements);
- if (V->getType() != PartitionTy)
- V = convertValue(TD, IRB, V, PartitionTy);
-
- // Mix in the existing elements.
- Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
- getName(".load"));
- V = insertVector(IRB, Old, V, BeginIndex, getName(".vec"));
+ bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp) {
+ if (V->getType() != VecTy) {
+ unsigned BeginIndex = getIndex(NewBeginOffset);
+ unsigned EndIndex = getIndex(NewEndOffset);
+ assert(EndIndex > BeginIndex && "Empty vector!");
+ unsigned NumElements = EndIndex - BeginIndex;
+ assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
+ Type *SliceTy =
+ (NumElements == 1) ? ElementTy
+ : VectorType::get(ElementTy, NumElements);
+ if (V->getType() != SliceTy)
+ V = convertValue(DL, IRB, V, SliceTy);
+ // Mix in the existing elements.
+ Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ "load");
+ V = insertVector(IRB, Old, V, BeginIndex, "vec");
+ }
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
Pass.DeadInsts.insert(&SI);
return true;
}
- bool rewriteIntegerStore(IRBuilder<> &IRB, Value *V, StoreInst &SI) {
+ bool rewriteIntegerStore(Value *V, StoreInst &SI) {
assert(IntTy && "We cannot extract an integer from the alloca");
assert(!SI.isVolatile());
- if (TD.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
+ if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
- getName(".oldload"));
- Old = convertValue(TD, IRB, Old, IntTy);
+ "oldload");
+ Old = convertValue(DL, IRB, Old, IntTy);
assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
- V = insertInteger(TD, IRB, Old, SI.getValueOperand(), Offset,
- getName(".insert"));
+ V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset,
+ "insert");
}
- V = convertValue(TD, IRB, V, NewAllocaTy);
+ V = convertValue(DL, IRB, V, NewAllocaTy);
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
Pass.DeadInsts.insert(&SI);
(void)Store;
DEBUG(dbgs() << " original: " << SI << "\n");
Value *OldOp = SI.getOperand(1);
assert(OldOp == OldPtr);
- IRBuilder<> IRB(&SI);
Value *V = SI.getValueOperand();
if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
Pass.PostPromotionWorklist.insert(AI);
- uint64_t Size = EndOffset - BeginOffset;
- if (Size < TD.getTypeStoreSize(V->getType())) {
+ if (SliceSize < DL.getTypeStoreSize(V->getType())) {
assert(!SI.isVolatile());
assert(V->getType()->isIntegerTy() &&
"Only integer type loads and stores are split");
assert(V->getType()->getIntegerBitWidth() ==
- TD.getTypeStoreSizeInBits(V->getType()) &&
+ DL.getTypeStoreSizeInBits(V->getType()) &&
"Non-byte-multiple bit width");
- assert(V->getType()->getIntegerBitWidth() ==
- TD.getTypeAllocSizeInBits(OldAI.getAllocatedType()) &&
- "Only alloca-wide stores can be split and recomposed");
- IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8);
- V = extractInteger(TD, IRB, V, NarrowTy, BeginOffset,
- getName(".extract"));
+ IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8);
+ V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset,
+ "extract");
}
if (VecTy)
- return rewriteVectorizedStoreInst(IRB, V, SI, OldOp);
+ return rewriteVectorizedStoreInst(V, SI, OldOp);
if (IntTy && V->getType()->isIntegerTy())
- return rewriteIntegerStore(IRB, V, SI);
+ return rewriteIntegerStore(V, SI);
StoreInst *NewSI;
- if (BeginOffset == NewAllocaBeginOffset &&
- canConvertValue(TD, V->getType(), NewAllocaTy)) {
- V = convertValue(TD, IRB, V, NewAllocaTy);
+ if (NewBeginOffset == NewAllocaBeginOffset &&
+ NewEndOffset == NewAllocaEndOffset &&
+ canConvertValue(DL, V->getType(), NewAllocaTy)) {
+ V = convertValue(DL, IRB, V, NewAllocaTy);
NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
SI.isVolatile());
} else {
- Value *NewPtr = getAdjustedAllocaPtr(IRB, V->getType()->getPointerTo());
- NewSI = IRB.CreateAlignedStore(V, NewPtr,
- getPartitionTypeAlign(V->getType()),
+ Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo());
+ NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()),
SI.isVolatile());
}
(void)NewSI;
///
/// Note that this routine assumes an i8 is a byte. If that isn't true, don't
/// call this routine.
- /// FIXME: Heed the abvice above.
+ /// FIXME: Heed the advice above.
///
/// \param V The i8 value to splat.
/// \param Size The number of bytes in the output (assuming i8 is one byte)
- Value *getIntegerSplat(IRBuilder<> &IRB, Value *V, unsigned Size) {
+ Value *getIntegerSplat(Value *V, unsigned Size) {
assert(Size > 0 && "Expected a positive number of bytes.");
IntegerType *VTy = cast<IntegerType>(V->getType());
assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte");
return V;
Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size*8);
- V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, getName(".zext")),
+ V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, "zext"),
ConstantExpr::getUDiv(
Constant::getAllOnesValue(SplatIntTy),
ConstantExpr::getZExt(
Constant::getAllOnesValue(V->getType()),
SplatIntTy)),
- getName(".isplat"));
+ "isplat");
return V;
}
/// \brief Compute a vector splat for a given element value.
- Value *getVectorSplat(IRBuilder<> &IRB, Value *V, unsigned NumElements) {
- V = IRB.CreateVectorSplat(NumElements, V, NamePrefix);
+ Value *getVectorSplat(Value *V, unsigned NumElements) {
+ V = IRB.CreateVectorSplat(NumElements, V, "vsplat");
DEBUG(dbgs() << " splat: " << *V << "\n");
return V;
}
bool visitMemSetInst(MemSetInst &II) {
DEBUG(dbgs() << " original: " << II << "\n");
- IRBuilder<> IRB(&II);
assert(II.getRawDest() == OldPtr);
// If the memset has a variable size, it cannot be split, just adjust the
// pointer to the new alloca.
if (!isa<Constant>(II.getLength())) {
- II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType()));
+ assert(!IsSplit);
+ assert(NewBeginOffset == BeginOffset);
+ II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType()));
Type *CstTy = II.getAlignmentCst()->getType();
- II.setAlignment(ConstantInt::get(CstTy, getPartitionAlign()));
+ II.setAlignment(ConstantInt::get(CstTy, getSliceAlign()));
deleteIfTriviallyDead(OldPtr);
return false;
// If this doesn't map cleanly onto the alloca type, and that type isn't
// a single value type, just emit a memset.
if (!VecTy && !IntTy &&
- (BeginOffset != NewAllocaBeginOffset ||
- EndOffset != NewAllocaEndOffset ||
+ (BeginOffset > NewAllocaBeginOffset ||
+ EndOffset < NewAllocaEndOffset ||
+ SliceSize != DL.getTypeStoreSize(AllocaTy) ||
!AllocaTy->isSingleValueType() ||
- !TD.isLegalInteger(TD.getTypeSizeInBits(ScalarTy)) ||
- TD.getTypeSizeInBits(ScalarTy)%8 != 0)) {
+ !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) ||
+ DL.getTypeSizeInBits(ScalarTy)%8 != 0)) {
Type *SizeTy = II.getLength()->getType();
- Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset);
- CallInst *New
- = IRB.CreateMemSet(getAdjustedAllocaPtr(IRB,
- II.getRawDest()->getType()),
- II.getValue(), Size, getPartitionAlign(),
- II.isVolatile());
+ Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
+ CallInst *New = IRB.CreateMemSet(
+ getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size,
+ getSliceAlign(), II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return false;
// If this is a memset of a vectorized alloca, insert it.
assert(ElementTy == ScalarTy);
- unsigned BeginIndex = getIndex(BeginOffset);
- unsigned EndIndex = getIndex(EndOffset);
+ unsigned BeginIndex = getIndex(NewBeginOffset);
+ unsigned EndIndex = getIndex(NewEndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
unsigned NumElements = EndIndex - BeginIndex;
assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
- Value *Splat = getIntegerSplat(IRB, II.getValue(),
- TD.getTypeSizeInBits(ElementTy)/8);
- Splat = convertValue(TD, IRB, Splat, ElementTy);
+ Value *Splat =
+ getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8);
+ Splat = convertValue(DL, IRB, Splat, ElementTy);
if (NumElements > 1)
- Splat = getVectorSplat(IRB, Splat, NumElements);
+ Splat = getVectorSplat(Splat, NumElements);
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
- getName(".oldload"));
- V = insertVector(IRB, Old, Splat, BeginIndex, getName(".vec"));
+ "oldload");
+ V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
} else if (IntTy) {
// If this is a memset on an alloca where we can widen stores, insert the
// set integer.
assert(!II.isVolatile());
- uint64_t Size = EndOffset - BeginOffset;
- V = getIntegerSplat(IRB, II.getValue(), Size);
+ uint64_t Size = NewEndOffset - NewBeginOffset;
+ V = getIntegerSplat(II.getValue(), Size);
if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
EndOffset != NewAllocaBeginOffset)) {
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
- getName(".oldload"));
- Old = convertValue(TD, IRB, Old, IntTy);
- assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
- uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
- V = insertInteger(TD, IRB, Old, V, Offset, getName(".insert"));
+ "oldload");
+ Old = convertValue(DL, IRB, Old, IntTy);
+ uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
+ V = insertInteger(DL, IRB, Old, V, Offset, "insert");
} else {
assert(V->getType() == IntTy &&
"Wrong type for an alloca wide integer!");
}
- V = convertValue(TD, IRB, V, AllocaTy);
+ V = convertValue(DL, IRB, V, AllocaTy);
} else {
// Established these invariants above.
- assert(BeginOffset == NewAllocaBeginOffset);
- assert(EndOffset == NewAllocaEndOffset);
+ assert(NewBeginOffset == NewAllocaBeginOffset);
+ assert(NewEndOffset == NewAllocaEndOffset);
- V = getIntegerSplat(IRB, II.getValue(),
- TD.getTypeSizeInBits(ScalarTy)/8);
+ V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8);
if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy))
- V = getVectorSplat(IRB, V, AllocaVecTy->getNumElements());
+ V = getVectorSplat(V, AllocaVecTy->getNumElements());
- V = convertValue(TD, IRB, V, AllocaTy);
+ V = convertValue(DL, IRB, V, AllocaTy);
}
Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
// them into two categories: split intrinsics and unsplit intrinsics.
DEBUG(dbgs() << " original: " << II << "\n");
- IRBuilder<> IRB(&II);
-
- assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr);
- bool IsDest = II.getRawDest() == OldPtr;
- const AllocaPartitioning::MemTransferOffsets &MTO
- = P.getMemTransferOffsets(II);
+ bool IsDest = &II.getRawDestUse() == OldUse;
+ assert((IsDest && II.getRawDest() == OldPtr) ||
+ (!IsDest && II.getRawSource() == OldPtr));
- // Compute the relative offset within the transfer.
- unsigned IntPtrWidth = TD.getPointerSizeInBits();
- APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
- : MTO.SourceBegin));
-
- unsigned Align = II.getAlignment();
- if (Align > 1)
- Align = MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(),
- MinAlign(II.getAlignment(), getPartitionAlign()));
+ unsigned SliceAlign = getSliceAlign();
// For unsplit intrinsics, we simply modify the source and destination
// pointers in place. This isn't just an optimization, it is a matter of
// a variable length. We may also be dealing with memmove instead of
// memcpy, and so simply updating the pointers is the necessary for us to
// update both source and dest of a single call.
- if (!MTO.IsSplittable) {
- Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource();
+ if (!IsSplittable) {
+ Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
if (IsDest)
- II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType()));
+ II.setDest(AdjustedPtr);
else
- II.setSource(getAdjustedAllocaPtr(IRB, II.getRawSource()->getType()));
+ II.setSource(AdjustedPtr);
- Type *CstTy = II.getAlignmentCst()->getType();
- II.setAlignment(ConstantInt::get(CstTy, Align));
+ if (II.getAlignment() > SliceAlign) {
+ Type *CstTy = II.getAlignmentCst()->getType();
+ II.setAlignment(
+ ConstantInt::get(CstTy, MinAlign(II.getAlignment(), SliceAlign)));
+ }
DEBUG(dbgs() << " to: " << II << "\n");
- deleteIfTriviallyDead(OldOp);
+ deleteIfTriviallyDead(OldPtr);
return false;
}
// For split transfer intrinsics we have an incredibly useful assurance:
// If this doesn't map cleanly onto the alloca type, and that type isn't
// a single value type, just emit a memcpy.
- bool EmitMemCpy
- = !VecTy && !IntTy && (BeginOffset != NewAllocaBeginOffset ||
- EndOffset != NewAllocaEndOffset ||
- !NewAI.getAllocatedType()->isSingleValueType());
+ bool EmitMemCpy =
+ !VecTy && !IntTy &&
+ (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset ||
+ SliceSize != DL.getTypeStoreSize(NewAI.getAllocatedType()) ||
+ !NewAI.getAllocatedType()->isSingleValueType());
// If we're just going to emit a memcpy, the alloca hasn't changed, and the
// size hasn't been shrunk based on analysis of the viable range, this is
// a no-op.
if (EmitMemCpy && &OldAI == &NewAI) {
- uint64_t OrigBegin = IsDest ? MTO.DestBegin : MTO.SourceBegin;
- uint64_t OrigEnd = IsDest ? MTO.DestEnd : MTO.SourceEnd;
// Ensure the start lines up.
- assert(BeginOffset == OrigBegin);
- (void)OrigBegin;
+ assert(NewBeginOffset == BeginOffset);
// Rewrite the size as needed.
- if (EndOffset != OrigEnd)
+ if (NewEndOffset != EndOffset)
II.setLength(ConstantInt::get(II.getLength()->getType(),
- EndOffset - BeginOffset));
+ NewEndOffset - NewBeginOffset));
return false;
}
// Record this instruction for deletion.
// alloca that should be re-examined after rewriting this instruction.
Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
if (AllocaInst *AI
- = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets()))
+ = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) {
+ assert(AI != &OldAI && AI != &NewAI &&
+ "Splittable transfers cannot reach the same alloca on both ends.");
Pass.Worklist.insert(AI);
+ }
- if (EmitMemCpy) {
- Type *OtherPtrTy = IsDest ? II.getRawSource()->getType()
- : II.getRawDest()->getType();
+ Type *OtherPtrTy = OtherPtr->getType();
+ unsigned OtherAS = OtherPtrTy->getPointerAddressSpace();
+ // Compute the relative offset for the other pointer within the transfer.
+ unsigned IntPtrWidth = DL.getPointerSizeInBits(OtherAS);
+ APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset);
+ unsigned OtherAlign = MinAlign(II.getAlignment() ? II.getAlignment() : 1,
+ OtherOffset.zextOrTrunc(64).getZExtValue());
+
+ if (EmitMemCpy) {
// Compute the other pointer, folding as much as possible to produce
// a single, simple GEP in most cases.
- OtherPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy,
- getName("." + OtherPtr->getName()));
+ OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
+ OtherPtr->getName() + ".");
- Value *OurPtr
- = getAdjustedAllocaPtr(IRB, IsDest ? II.getRawDest()->getType()
- : II.getRawSource()->getType());
+ Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
Type *SizeTy = II.getLength()->getType();
- Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset);
+ Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
- CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
- IsDest ? OtherPtr : OurPtr,
- Size, Align, II.isVolatile());
+ CallInst *New = IRB.CreateMemCpy(
+ IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, Size,
+ MinAlign(SliceAlign, OtherAlign), II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return false;
}
- // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy
- // is equivalent to 1, but that isn't true if we end up rewriting this as
- // a load or store.
- if (!Align)
- Align = 1;
-
- bool IsWholeAlloca = BeginOffset == NewAllocaBeginOffset &&
- EndOffset == NewAllocaEndOffset;
- uint64_t Size = EndOffset - BeginOffset;
- unsigned BeginIndex = VecTy ? getIndex(BeginOffset) : 0;
- unsigned EndIndex = VecTy ? getIndex(EndOffset) : 0;
+ bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset &&
+ NewEndOffset == NewAllocaEndOffset;
+ uint64_t Size = NewEndOffset - NewBeginOffset;
+ unsigned BeginIndex = VecTy ? getIndex(NewBeginOffset) : 0;
+ unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0;
unsigned NumElements = EndIndex - BeginIndex;
IntegerType *SubIntTy
- = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0;
+ = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : nullptr;
- Type *OtherPtrTy = NewAI.getType();
+ // Reset the other pointer type to match the register type we're going to
+ // use, but using the address space of the original other pointer.
if (VecTy && !IsWholeAlloca) {
if (NumElements == 1)
OtherPtrTy = VecTy->getElementType();
else
OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements);
- OtherPtrTy = OtherPtrTy->getPointerTo();
+ OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS);
} else if (IntTy && !IsWholeAlloca) {
- OtherPtrTy = SubIntTy->getPointerTo();
+ OtherPtrTy = SubIntTy->getPointerTo(OtherAS);
+ } else {
+ OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS);
}
- Value *SrcPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy,
- getName("." + OtherPtr->getName()));
+ Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
+ OtherPtr->getName() + ".");
+ unsigned SrcAlign = OtherAlign;
Value *DstPtr = &NewAI;
- if (!IsDest)
+ unsigned DstAlign = SliceAlign;
+ if (!IsDest) {
std::swap(SrcPtr, DstPtr);
+ std::swap(SrcAlign, DstAlign);
+ }
Value *Src;
if (VecTy && !IsWholeAlloca && !IsDest) {
Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
- getName(".load"));
- Src = extractVector(IRB, Src, BeginIndex, EndIndex, getName(".vec"));
+ "load");
+ Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
} else if (IntTy && !IsWholeAlloca && !IsDest) {
Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
- getName(".load"));
- Src = convertValue(TD, IRB, Src, IntTy);
- assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
- uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
- Src = extractInteger(TD, IRB, Src, SubIntTy, Offset, getName(".extract"));
+ "load");
+ Src = convertValue(DL, IRB, Src, IntTy);
+ uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
+ Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
} else {
- Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(),
- getName(".copyload"));
+ Src = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(),
+ "copyload");
}
if (VecTy && !IsWholeAlloca && IsDest) {
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
- getName(".oldload"));
- Src = insertVector(IRB, Old, Src, BeginIndex, getName(".vec"));
+ "oldload");
+ Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
} else if (IntTy && !IsWholeAlloca && IsDest) {
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
- getName(".oldload"));
- Old = convertValue(TD, IRB, Old, IntTy);
- assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
- uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
- Src = insertInteger(TD, IRB, Old, Src, Offset, getName(".insert"));
- Src = convertValue(TD, IRB, Src, NewAllocaTy);
+ "oldload");
+ Old = convertValue(DL, IRB, Old, IntTy);
+ uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
+ Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
+ Src = convertValue(DL, IRB, Src, NewAllocaTy);
}
StoreInst *Store = cast<StoreInst>(
- IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile()));
+ IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile()));
(void)Store;
DEBUG(dbgs() << " to: " << *Store << "\n");
return !II.isVolatile();
assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
II.getIntrinsicID() == Intrinsic::lifetime_end);
DEBUG(dbgs() << " original: " << II << "\n");
- IRBuilder<> IRB(&II);
assert(II.getArgOperand(1) == OldPtr);
// Record this instruction for deletion.
ConstantInt *Size
= ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
- EndOffset - BeginOffset);
- Value *Ptr = getAdjustedAllocaPtr(IRB, II.getArgOperand(1)->getType());
+ NewEndOffset - NewBeginOffset);
+ Value *Ptr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
Value *New;
if (II.getIntrinsicID() == Intrinsic::lifetime_start)
New = IRB.CreateLifetimeStart(Ptr, Size);
bool visitPHINode(PHINode &PN) {
DEBUG(dbgs() << " original: " << PN << "\n");
+ assert(BeginOffset >= NewAllocaBeginOffset && "PHIs are unsplittable");
+ assert(EndOffset <= NewAllocaEndOffset && "PHIs are unsplittable");
// We would like to compute a new pointer in only one place, but have it be
// as local as possible to the PHI. To do that, we re-use the location of
// the old pointer, which necessarily must be in the right position to
// dominate the PHI.
- IRBuilder<> PtrBuilder(cast<Instruction>(OldPtr));
+ IRBuilderTy PtrBuilder(IRB);
+ PtrBuilder.SetInsertPoint(OldPtr);
+ PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc());
- Value *NewPtr = getAdjustedAllocaPtr(PtrBuilder, OldPtr->getType());
+ Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType());
// Replace the operands which were using the old pointer.
std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
DEBUG(dbgs() << " to: " << PN << "\n");
deleteIfTriviallyDead(OldPtr);
- return false;
+
+ // PHIs can't be promoted on their own, but often can be speculated. We
+ // check the speculation outside of the rewriter so that we see the
+ // fully-rewritten alloca.
+ PHIUsers.insert(&PN);
+ return true;
}
bool visitSelectInst(SelectInst &SI) {
DEBUG(dbgs() << " original: " << SI << "\n");
- IRBuilder<> IRB(&SI);
+ assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) &&
+ "Pointer isn't an operand!");
+ assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable");
+ assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable");
- // Find the operand we need to rewrite here.
- bool IsTrueVal = SI.getTrueValue() == OldPtr;
- if (IsTrueVal)
- assert(SI.getFalseValue() != OldPtr && "Pointer is both operands!");
- else
- assert(SI.getFalseValue() == OldPtr && "Pointer isn't an operand!");
+ Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
+ // Replace the operands which were using the old pointer.
+ if (SI.getOperand(1) == OldPtr)
+ SI.setOperand(1, NewPtr);
+ if (SI.getOperand(2) == OldPtr)
+ SI.setOperand(2, NewPtr);
- Value *NewPtr = getAdjustedAllocaPtr(IRB, OldPtr->getType());
- SI.setOperand(IsTrueVal ? 1 : 2, NewPtr);
DEBUG(dbgs() << " to: " << SI << "\n");
deleteIfTriviallyDead(OldPtr);
- return false;
+
+ // Selects can't be promoted on their own, but often can be speculated. We
+ // check the speculation outside of the rewriter so that we see the
+ // fully-rewritten alloca.
+ SelectUsers.insert(&SI);
+ return true;
}
};
// Befriend the base class so it can delegate to private visit methods.
friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>;
- const DataLayout &TD;
+ const DataLayout &DL;
/// Queue of pointer uses to analyze and potentially rewrite.
SmallVector<Use *, 8> Queue;
Use *U;
public:
- AggLoadStoreRewriter(const DataLayout &TD) : TD(TD) {}
+ AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {}
/// Rewrite loads and stores through a pointer and all pointers derived from
/// it.
/// Enqueue all the users of the given instruction for further processing.
/// This uses a set to de-duplicate users.
void enqueueUsers(Instruction &I) {
- for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
- ++UI)
- if (Visited.insert(*UI))
- Queue.push_back(&UI.getUse());
+ for (Use &U : I.uses())
+ if (Visited.insert(U.getUser()))
+ Queue.push_back(&U);
}
// Conservative default is to not rewrite anything.
class OpSplitter {
protected:
/// The builder used to form new instructions.
- IRBuilder<> IRB;
+ IRBuilderTy IRB;
/// The indices which to be used with insert- or extractvalue to select the
/// appropriate value within the aggregate.
SmallVector<unsigned, 4> Indices;
void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
assert(Ty->isSingleValueType());
// Load the single value and insert it using the indices.
- Value *Load = IRB.CreateLoad(IRB.CreateInBoundsGEP(Ptr, GEPIndices,
- Name + ".gep"),
- Name + ".load");
+ Value *GEP = IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep");
+ Value *Load = IRB.CreateLoad(GEP, Name + ".load");
Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
DEBUG(dbgs() << " to: " << *Load << "\n");
}
/// when the size or offset cause either end of type-based partition to be off.
/// Also, this is a best-effort routine. It is reasonable to give up and not
/// return a type if necessary.
-static Type *getTypePartition(const DataLayout &TD, Type *Ty,
+static Type *getTypePartition(const DataLayout &DL, Type *Ty,
uint64_t Offset, uint64_t Size) {
- if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size)
- return stripAggregateTypeWrapping(TD, Ty);
- if (Offset > TD.getTypeAllocSize(Ty) ||
- (TD.getTypeAllocSize(Ty) - Offset) < Size)
- return 0;
+ if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size)
+ return stripAggregateTypeWrapping(DL, Ty);
+ if (Offset > DL.getTypeAllocSize(Ty) ||
+ (DL.getTypeAllocSize(Ty) - Offset) < Size)
+ return nullptr;
if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
// We can't partition pointers...
if (SeqTy->isPointerTy())
- return 0;
+ return nullptr;
Type *ElementTy = SeqTy->getElementType();
- uint64_t ElementSize = TD.getTypeAllocSize(ElementTy);
+ uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
uint64_t NumSkippedElements = Offset / ElementSize;
- if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy))
+ if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy)) {
if (NumSkippedElements >= ArrTy->getNumElements())
- return 0;
- if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy))
+ return nullptr;
+ } else if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy)) {
if (NumSkippedElements >= VecTy->getNumElements())
- return 0;
+ return nullptr;
+ }
Offset -= NumSkippedElements * ElementSize;
// First check if we need to recurse.
if (Offset > 0 || Size < ElementSize) {
// Bail if the partition ends in a different array element.
if ((Offset + Size) > ElementSize)
- return 0;
+ return nullptr;
// Recurse through the element type trying to peel off offset bytes.
- return getTypePartition(TD, ElementTy, Offset, Size);
+ return getTypePartition(DL, ElementTy, Offset, Size);
}
assert(Offset == 0);
if (Size == ElementSize)
- return stripAggregateTypeWrapping(TD, ElementTy);
+ return stripAggregateTypeWrapping(DL, ElementTy);
assert(Size > ElementSize);
uint64_t NumElements = Size / ElementSize;
if (NumElements * ElementSize != Size)
- return 0;
+ return nullptr;
return ArrayType::get(ElementTy, NumElements);
}
StructType *STy = dyn_cast<StructType>(Ty);
if (!STy)
- return 0;
+ return nullptr;
- const StructLayout *SL = TD.getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
if (Offset >= SL->getSizeInBytes())
- return 0;
+ return nullptr;
uint64_t EndOffset = Offset + Size;
if (EndOffset > SL->getSizeInBytes())
- return 0;
+ return nullptr;
unsigned Index = SL->getElementContainingOffset(Offset);
Offset -= SL->getElementOffset(Index);
Type *ElementTy = STy->getElementType(Index);
- uint64_t ElementSize = TD.getTypeAllocSize(ElementTy);
+ uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
if (Offset >= ElementSize)
- return 0; // The offset points into alignment padding.
+ return nullptr; // The offset points into alignment padding.
// See if any partition must be contained by the element.
if (Offset > 0 || Size < ElementSize) {
if ((Offset + Size) > ElementSize)
- return 0;
- return getTypePartition(TD, ElementTy, Offset, Size);
+ return nullptr;
+ return getTypePartition(DL, ElementTy, Offset, Size);
}
assert(Offset == 0);
if (Size == ElementSize)
- return stripAggregateTypeWrapping(TD, ElementTy);
+ return stripAggregateTypeWrapping(DL, ElementTy);
StructType::element_iterator EI = STy->element_begin() + Index,
EE = STy->element_end();
if (EndOffset < SL->getSizeInBytes()) {
unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
if (Index == EndIndex)
- return 0; // Within a single element and its padding.
+ return nullptr; // Within a single element and its padding.
// Don't try to form "natural" types if the elements don't line up with the
// expected size.
// FIXME: We could potentially recurse down through the last element in the
// sub-struct to find a natural end point.
if (SL->getElementOffset(EndIndex) != EndOffset)
- return 0;
+ return nullptr;
assert(Index < EndIndex);
EE = STy->element_begin() + EndIndex;
// Try to build up a sub-structure.
StructType *SubTy = StructType::get(STy->getContext(), makeArrayRef(EI, EE),
STy->isPacked());
- const StructLayout *SubSL = TD.getStructLayout(SubTy);
+ const StructLayout *SubSL = DL.getStructLayout(SubTy);
if (Size != SubSL->getSizeInBytes())
- return 0; // The sub-struct doesn't have quite the size needed.
+ return nullptr; // The sub-struct doesn't have quite the size needed.
return SubTy;
}
/// appropriate new offsets. It also evaluates how successful the rewrite was
/// at enabling promotion and if it was successful queues the alloca to be
/// promoted.
-bool SROA::rewriteAllocaPartition(AllocaInst &AI,
- AllocaPartitioning &P,
- AllocaPartitioning::iterator PI) {
- uint64_t AllocaSize = PI->EndOffset - PI->BeginOffset;
- bool IsLive = false;
- for (AllocaPartitioning::use_iterator UI = P.use_begin(PI),
- UE = P.use_end(PI);
- UI != UE && !IsLive; ++UI)
- if (UI->U)
- IsLive = true;
- if (!IsLive)
- return false; // No live uses left of this partition.
-
- DEBUG(dbgs() << "Speculating PHIs and selects in partition "
- << "[" << PI->BeginOffset << "," << PI->EndOffset << ")\n");
-
- PHIOrSelectSpeculator Speculator(*TD, P, *this);
- DEBUG(dbgs() << " speculating ");
- DEBUG(P.print(dbgs(), PI, ""));
- Speculator.visitUsers(PI);
+bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S,
+ AllocaSlices::iterator B, AllocaSlices::iterator E,
+ int64_t BeginOffset, int64_t EndOffset,
+ ArrayRef<AllocaSlices::iterator> SplitUses) {
+ assert(BeginOffset < EndOffset);
+ uint64_t SliceSize = EndOffset - BeginOffset;
// Try to compute a friendly type for this partition of the alloca. This
// won't always succeed, in which case we fall back to a legal integer type
// or an i8 array of an appropriate size.
- Type *AllocaTy = 0;
- if (Type *PartitionTy = P.getCommonType(PI))
- if (TD->getTypeAllocSize(PartitionTy) >= AllocaSize)
- AllocaTy = PartitionTy;
- if (!AllocaTy)
- if (Type *PartitionTy = getTypePartition(*TD, AI.getAllocatedType(),
- PI->BeginOffset, AllocaSize))
- AllocaTy = PartitionTy;
- if ((!AllocaTy ||
- (AllocaTy->isArrayTy() &&
- AllocaTy->getArrayElementType()->isIntegerTy())) &&
- TD->isLegalInteger(AllocaSize * 8))
- AllocaTy = Type::getIntNTy(*C, AllocaSize * 8);
- if (!AllocaTy)
- AllocaTy = ArrayType::get(Type::getInt8Ty(*C), AllocaSize);
- assert(TD->getTypeAllocSize(AllocaTy) >= AllocaSize);
+ Type *SliceTy = nullptr;
+ if (Type *CommonUseTy = findCommonType(B, E, EndOffset))
+ if (DL->getTypeAllocSize(CommonUseTy) >= SliceSize)
+ SliceTy = CommonUseTy;
+ if (!SliceTy)
+ if (Type *TypePartitionTy = getTypePartition(*DL, AI.getAllocatedType(),
+ BeginOffset, SliceSize))
+ SliceTy = TypePartitionTy;
+ if ((!SliceTy || (SliceTy->isArrayTy() &&
+ SliceTy->getArrayElementType()->isIntegerTy())) &&
+ DL->isLegalInteger(SliceSize * 8))
+ SliceTy = Type::getIntNTy(*C, SliceSize * 8);
+ if (!SliceTy)
+ SliceTy = ArrayType::get(Type::getInt8Ty(*C), SliceSize);
+ assert(DL->getTypeAllocSize(SliceTy) >= SliceSize);
+
+ bool IsVectorPromotable = isVectorPromotionViable(
+ *DL, SliceTy, S, BeginOffset, EndOffset, B, E, SplitUses);
+
+ bool IsIntegerPromotable =
+ !IsVectorPromotable &&
+ isIntegerWideningViable(*DL, SliceTy, BeginOffset, S, B, E, SplitUses);
// Check for the case where we're going to rewrite to a new alloca of the
// exact same type as the original, and with the same access offsets. In that
// case, re-use the existing alloca, but still run through the rewriter to
- // performe phi and select speculation.
+ // perform phi and select speculation.
AllocaInst *NewAI;
- if (AllocaTy == AI.getAllocatedType()) {
- assert(PI->BeginOffset == 0 &&
+ if (SliceTy == AI.getAllocatedType()) {
+ assert(BeginOffset == 0 &&
"Non-zero begin offset but same alloca type");
- assert(PI == P.begin() && "Begin offset is zero on later partition");
NewAI = &AI;
+ // FIXME: We should be able to bail at this point with "nothing changed".
+ // FIXME: We might want to defer PHI speculation until after here.
} else {
unsigned Alignment = AI.getAlignment();
if (!Alignment) {
// The minimum alignment which users can rely on when the explicit
// alignment is omitted or zero is that required by the ABI for this
// type.
- Alignment = TD->getABITypeAlignment(AI.getAllocatedType());
+ Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
}
- Alignment = MinAlign(Alignment, PI->BeginOffset);
+ Alignment = MinAlign(Alignment, BeginOffset);
// If we will get at least this much alignment from the type alone, leave
// the alloca's alignment unconstrained.
- if (Alignment <= TD->getABITypeAlignment(AllocaTy))
+ if (Alignment <= DL->getABITypeAlignment(SliceTy))
Alignment = 0;
- NewAI = new AllocaInst(AllocaTy, 0, Alignment,
- AI.getName() + ".sroa." + Twine(PI - P.begin()),
- &AI);
+ NewAI = new AllocaInst(SliceTy, nullptr, Alignment,
+ AI.getName() + ".sroa." + Twine(B - S.begin()), &AI);
++NumNewAllocas;
}
DEBUG(dbgs() << "Rewriting alloca partition "
- << "[" << PI->BeginOffset << "," << PI->EndOffset << ") to: "
- << *NewAI << "\n");
+ << "[" << BeginOffset << "," << EndOffset << ") to: " << *NewAI
+ << "\n");
- // Track the high watermark of the post-promotion worklist. We will reset it
- // to this point if the alloca is not in fact scheduled for promotion.
+ // Track the high watermark on the worklist as it is only relevant for
+ // promoted allocas. We will reset it to this point if the alloca is not in
+ // fact scheduled for promotion.
unsigned PPWOldSize = PostPromotionWorklist.size();
+ unsigned NumUses = 0;
+ SmallPtrSet<PHINode *, 8> PHIUsers;
+ SmallPtrSet<SelectInst *, 8> SelectUsers;
+
+ AllocaSliceRewriter Rewriter(*DL, S, *this, AI, *NewAI, BeginOffset,
+ EndOffset, IsVectorPromotable,
+ IsIntegerPromotable, PHIUsers, SelectUsers);
+ bool Promotable = true;
+ for (ArrayRef<AllocaSlices::iterator>::const_iterator SUI = SplitUses.begin(),
+ SUE = SplitUses.end();
+ SUI != SUE; ++SUI) {
+ DEBUG(dbgs() << " rewriting split ");
+ DEBUG(S.printSlice(dbgs(), *SUI, ""));
+ Promotable &= Rewriter.visit(*SUI);
+ ++NumUses;
+ }
+ for (AllocaSlices::iterator I = B; I != E; ++I) {
+ DEBUG(dbgs() << " rewriting ");
+ DEBUG(S.printSlice(dbgs(), I, ""));
+ Promotable &= Rewriter.visit(I);
+ ++NumUses;
+ }
+
+ NumAllocaPartitionUses += NumUses;
+ MaxUsesPerAllocaPartition =
+ std::max<unsigned>(NumUses, MaxUsesPerAllocaPartition);
+
+ // Now that we've processed all the slices in the new partition, check if any
+ // PHIs or Selects would block promotion.
+ for (SmallPtrSetImpl<PHINode *>::iterator I = PHIUsers.begin(),
+ E = PHIUsers.end();
+ I != E; ++I)
+ if (!isSafePHIToSpeculate(**I, DL)) {
+ Promotable = false;
+ PHIUsers.clear();
+ SelectUsers.clear();
+ break;
+ }
+ for (SmallPtrSetImpl<SelectInst *>::iterator I = SelectUsers.begin(),
+ E = SelectUsers.end();
+ I != E; ++I)
+ if (!isSafeSelectToSpeculate(**I, DL)) {
+ Promotable = false;
+ PHIUsers.clear();
+ SelectUsers.clear();
+ break;
+ }
- AllocaPartitionRewriter Rewriter(*TD, P, PI, *this, AI, *NewAI,
- PI->BeginOffset, PI->EndOffset);
- DEBUG(dbgs() << " rewriting ");
- DEBUG(P.print(dbgs(), PI, ""));
- bool Promotable = Rewriter.visitUsers(P.use_begin(PI), P.use_end(PI));
if (Promotable) {
- DEBUG(dbgs() << " and queuing for promotion\n");
- PromotableAllocas.push_back(NewAI);
- } else if (NewAI != &AI) {
+ if (PHIUsers.empty() && SelectUsers.empty()) {
+ // Promote the alloca.
+ PromotableAllocas.push_back(NewAI);
+ } else {
+ // If we have either PHIs or Selects to speculate, add them to those
+ // worklists and re-queue the new alloca so that we promote in on the
+ // next iteration.
+ for (SmallPtrSetImpl<PHINode *>::iterator I = PHIUsers.begin(),
+ E = PHIUsers.end();
+ I != E; ++I)
+ SpeculatablePHIs.insert(*I);
+ for (SmallPtrSetImpl<SelectInst *>::iterator I = SelectUsers.begin(),
+ E = SelectUsers.end();
+ I != E; ++I)
+ SpeculatableSelects.insert(*I);
+ Worklist.insert(NewAI);
+ }
+ } else {
// If we can't promote the alloca, iterate on it to check for new
// refinements exposed by splitting the current alloca. Don't iterate on an
// alloca which didn't actually change and didn't get promoted.
- Worklist.insert(NewAI);
- }
+ if (NewAI != &AI)
+ Worklist.insert(NewAI);
- // Drop any post-promotion work items if promotion didn't happen.
- if (!Promotable)
+ // Drop any post-promotion work items if promotion didn't happen.
while (PostPromotionWorklist.size() > PPWOldSize)
PostPromotionWorklist.pop_back();
+ }
return true;
}
-/// \brief Walks the partitioning of an alloca rewriting uses of each partition.
-bool SROA::splitAlloca(AllocaInst &AI, AllocaPartitioning &P) {
+static void
+removeFinishedSplitUses(SmallVectorImpl<AllocaSlices::iterator> &SplitUses,
+ uint64_t &MaxSplitUseEndOffset, uint64_t Offset) {
+ if (Offset >= MaxSplitUseEndOffset) {
+ SplitUses.clear();
+ MaxSplitUseEndOffset = 0;
+ return;
+ }
+
+ size_t SplitUsesOldSize = SplitUses.size();
+ SplitUses.erase(std::remove_if(SplitUses.begin(), SplitUses.end(),
+ [Offset](const AllocaSlices::iterator &I) {
+ return I->endOffset() <= Offset;
+ }),
+ SplitUses.end());
+ if (SplitUsesOldSize == SplitUses.size())
+ return;
+
+ // Recompute the max. While this is linear, so is remove_if.
+ MaxSplitUseEndOffset = 0;
+ for (SmallVectorImpl<AllocaSlices::iterator>::iterator
+ SUI = SplitUses.begin(),
+ SUE = SplitUses.end();
+ SUI != SUE; ++SUI)
+ MaxSplitUseEndOffset = std::max((*SUI)->endOffset(), MaxSplitUseEndOffset);
+}
+
+/// \brief Walks the slices of an alloca and form partitions based on them,
+/// rewriting each of their uses.
+bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) {
+ if (S.begin() == S.end())
+ return false;
+
+ unsigned NumPartitions = 0;
bool Changed = false;
- for (AllocaPartitioning::iterator PI = P.begin(), PE = P.end(); PI != PE;
- ++PI)
- Changed |= rewriteAllocaPartition(AI, P, PI);
+ SmallVector<AllocaSlices::iterator, 4> SplitUses;
+ uint64_t MaxSplitUseEndOffset = 0;
+
+ uint64_t BeginOffset = S.begin()->beginOffset();
+
+ for (AllocaSlices::iterator SI = S.begin(), SJ = std::next(SI), SE = S.end();
+ SI != SE; SI = SJ) {
+ uint64_t MaxEndOffset = SI->endOffset();
+
+ if (!SI->isSplittable()) {
+ // When we're forming an unsplittable region, it must always start at the
+ // first slice and will extend through its end.
+ assert(BeginOffset == SI->beginOffset());
+
+ // Form a partition including all of the overlapping slices with this
+ // unsplittable slice.
+ while (SJ != SE && SJ->beginOffset() < MaxEndOffset) {
+ if (!SJ->isSplittable())
+ MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset());
+ ++SJ;
+ }
+ } else {
+ assert(SI->isSplittable()); // Established above.
+
+ // Collect all of the overlapping splittable slices.
+ while (SJ != SE && SJ->beginOffset() < MaxEndOffset &&
+ SJ->isSplittable()) {
+ MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset());
+ ++SJ;
+ }
+
+ // Back up MaxEndOffset and SJ if we ended the span early when
+ // encountering an unsplittable slice.
+ if (SJ != SE && SJ->beginOffset() < MaxEndOffset) {
+ assert(!SJ->isSplittable());
+ MaxEndOffset = SJ->beginOffset();
+ }
+ }
+
+ // Check if we have managed to move the end offset forward yet. If so,
+ // we'll have to rewrite uses and erase old split uses.
+ if (BeginOffset < MaxEndOffset) {
+ // Rewrite a sequence of overlapping slices.
+ Changed |=
+ rewritePartition(AI, S, SI, SJ, BeginOffset, MaxEndOffset, SplitUses);
+ ++NumPartitions;
+
+ removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset, MaxEndOffset);
+ }
+
+ // Accumulate all the splittable slices from the [SI,SJ) region which
+ // overlap going forward.
+ for (AllocaSlices::iterator SK = SI; SK != SJ; ++SK)
+ if (SK->isSplittable() && SK->endOffset() > MaxEndOffset) {
+ SplitUses.push_back(SK);
+ MaxSplitUseEndOffset = std::max(SK->endOffset(), MaxSplitUseEndOffset);
+ }
+
+ // If we're already at the end and we have no split uses, we're done.
+ if (SJ == SE && SplitUses.empty())
+ break;
+
+ // If we have no split uses or no gap in offsets, we're ready to move to
+ // the next slice.
+ if (SplitUses.empty() || (SJ != SE && MaxEndOffset == SJ->beginOffset())) {
+ BeginOffset = SJ->beginOffset();
+ continue;
+ }
+
+ // Even if we have split slices, if the next slice is splittable and the
+ // split slices reach it, we can simply set up the beginning offset of the
+ // next iteration to bridge between them.
+ if (SJ != SE && SJ->isSplittable() &&
+ MaxSplitUseEndOffset > SJ->beginOffset()) {
+ BeginOffset = MaxEndOffset;
+ continue;
+ }
+
+ // Otherwise, we have a tail of split slices. Rewrite them with an empty
+ // range of slices.
+ uint64_t PostSplitEndOffset =
+ SJ == SE ? MaxSplitUseEndOffset : SJ->beginOffset();
+
+ Changed |= rewritePartition(AI, S, SJ, SJ, MaxEndOffset, PostSplitEndOffset,
+ SplitUses);
+ ++NumPartitions;
+
+ if (SJ == SE)
+ break; // Skip the rest, we don't need to do any cleanup.
+
+ removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset,
+ PostSplitEndOffset);
+
+ // Now just reset the begin offset for the next iteration.
+ BeginOffset = SJ->beginOffset();
+ }
+
+ NumAllocaPartitions += NumPartitions;
+ MaxPartitionsPerAlloca =
+ std::max<unsigned>(NumPartitions, MaxPartitionsPerAlloca);
return Changed;
}
+/// \brief Clobber a use with undef, deleting the used value if it becomes dead.
+void SROA::clobberUse(Use &U) {
+ Value *OldV = U;
+ // Replace the use with an undef value.
+ U = UndefValue::get(OldV->getType());
+
+ // Check for this making an instruction dead. We have to garbage collect
+ // all the dead instructions to ensure the uses of any alloca end up being
+ // minimal.
+ if (Instruction *OldI = dyn_cast<Instruction>(OldV))
+ if (isInstructionTriviallyDead(OldI)) {
+ DeadInsts.insert(OldI);
+ }
+}
+
/// \brief Analyze an alloca for SROA.
///
/// This analyzes the alloca to ensure we can reason about it, builds
-/// a partitioning of the alloca, and then hands it off to be split and
+/// the slices of the alloca, and then hands it off to be split and
/// rewritten as needed.
bool SROA::runOnAlloca(AllocaInst &AI) {
DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
// Skip alloca forms that this analysis can't handle.
if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
- TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
+ DL->getTypeAllocSize(AI.getAllocatedType()) == 0)
return false;
bool Changed = false;
// First, split any FCA loads and stores touching this alloca to promote
// better splitting and promotion opportunities.
- AggLoadStoreRewriter AggRewriter(*TD);
+ AggLoadStoreRewriter AggRewriter(*DL);
Changed |= AggRewriter.rewrite(AI);
- // Build the partition set using a recursive instruction-visiting builder.
- AllocaPartitioning P(*TD, AI);
- DEBUG(P.print(dbgs()));
- if (P.isEscaped())
+ // Build the slices using a recursive instruction-visiting builder.
+ AllocaSlices S(*DL, AI);
+ DEBUG(S.print(dbgs()));
+ if (S.isEscaped())
return Changed;
// Delete all the dead users of this alloca before splitting and rewriting it.
- for (AllocaPartitioning::dead_user_iterator DI = P.dead_user_begin(),
- DE = P.dead_user_end();
+ for (AllocaSlices::dead_user_iterator DI = S.dead_user_begin(),
+ DE = S.dead_user_end();
DI != DE; ++DI) {
- Changed = true;
+ // Free up everything used by this instruction.
+ for (Use &DeadOp : (*DI)->operands())
+ clobberUse(DeadOp);
+
+ // Now replace the uses of this instruction.
(*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType()));
+
+ // And mark it for deletion.
DeadInsts.insert(*DI);
+ Changed = true;
}
- for (AllocaPartitioning::dead_op_iterator DO = P.dead_op_begin(),
- DE = P.dead_op_end();
+ for (AllocaSlices::dead_op_iterator DO = S.dead_op_begin(),
+ DE = S.dead_op_end();
DO != DE; ++DO) {
- Value *OldV = **DO;
- // Clobber the use with an undef value.
- **DO = UndefValue::get(OldV->getType());
- if (Instruction *OldI = dyn_cast<Instruction>(OldV))
- if (isInstructionTriviallyDead(OldI)) {
- Changed = true;
- DeadInsts.insert(OldI);
- }
+ clobberUse(**DO);
+ Changed = true;
}
- // No partitions to split. Leave the dead alloca for a later pass to clean up.
- if (P.begin() == P.end())
+ // No slices to split. Leave the dead alloca for a later pass to clean up.
+ if (S.begin() == S.end())
return Changed;
- return splitAlloca(AI, P) || Changed;
+ Changed |= splitAlloca(AI, S);
+
+ DEBUG(dbgs() << " Speculating PHIs\n");
+ while (!SpeculatablePHIs.empty())
+ speculatePHINodeLoads(*SpeculatablePHIs.pop_back_val());
+
+ DEBUG(dbgs() << " Speculating Selects\n");
+ while (!SpeculatableSelects.empty())
+ speculateSelectInstLoads(*SpeculatableSelects.pop_back_val());
+
+ return Changed;
}
/// \brief Delete the dead instructions accumulated in this run.
///
/// We also record the alloca instructions deleted here so that they aren't
/// subsequently handed to mem2reg to promote.
-void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
+void SROA::deleteDeadInstructions(SmallPtrSetImpl<AllocaInst*> &DeletedAllocas) {
while (!DeadInsts.empty()) {
Instruction *I = DeadInsts.pop_back_val();
DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
I->replaceAllUsesWith(UndefValue::get(I->getType()));
- for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
- if (Instruction *U = dyn_cast<Instruction>(*OI)) {
+ for (Use &Operand : I->operands())
+ if (Instruction *U = dyn_cast<Instruction>(Operand)) {
// Zero out the operand and see if it becomes trivially dead.
- *OI = 0;
+ Operand = nullptr;
if (isInstructionTriviallyDead(U))
DeadInsts.insert(U);
}
}
}
+static void enqueueUsersInWorklist(Instruction &I,
+ SmallVectorImpl<Instruction *> &Worklist,
+ SmallPtrSetImpl<Instruction *> &Visited) {
+ for (User *U : I.users())
+ if (Visited.insert(cast<Instruction>(U)))
+ Worklist.push_back(cast<Instruction>(U));
+}
+
/// \brief Promote the allocas, using the best available technique.
///
/// This attempts to promote whatever allocas have been identified as viable in
/// If there is a domtree available, we attempt to promote using the full power
/// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is
/// based on the SSAUpdater utilities. This function returns whether any
-/// promotion occured.
+/// promotion occurred.
bool SROA::promoteAllocas(Function &F) {
if (PromotableAllocas.empty())
return false;
DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n");
SSAUpdater SSA;
DIBuilder DIB(*F.getParent());
- SmallVector<Instruction*, 64> Insts;
+ SmallVector<Instruction *, 64> Insts;
+
+ // We need a worklist to walk the uses of each alloca.
+ SmallVector<Instruction *, 8> Worklist;
+ SmallPtrSet<Instruction *, 8> Visited;
+ SmallVector<Instruction *, 32> DeadInsts;
for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) {
AllocaInst *AI = PromotableAllocas[Idx];
- for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
- UI != UE;) {
- Instruction *I = cast<Instruction>(*UI++);
+ Insts.clear();
+ Worklist.clear();
+ Visited.clear();
+
+ enqueueUsersInWorklist(*AI, Worklist, Visited);
+
+ while (!Worklist.empty()) {
+ Instruction *I = Worklist.pop_back_val();
+
// FIXME: Currently the SSAUpdater infrastructure doesn't reason about
// lifetime intrinsics and so we strip them (and the bitcasts+GEPs
// leading to them) here. Eventually it should use them to optimize the
// scalar values produced.
- if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
- assert(onlyUsedByLifetimeMarkers(I) &&
- "Found a bitcast used outside of a lifetime marker.");
- while (!I->use_empty())
- cast<Instruction>(*I->use_begin())->eraseFromParent();
- I->eraseFromParent();
- continue;
- }
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
assert(II->getIntrinsicID() == Intrinsic::lifetime_start ||
II->getIntrinsicID() == Intrinsic::lifetime_end);
continue;
}
- Insts.push_back(I);
+ // Push the loads and stores we find onto the list. SROA will already
+ // have validated that all loads and stores are viable candidates for
+ // promotion.
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ assert(LI->getType() == AI->getAllocatedType());
+ Insts.push_back(LI);
+ continue;
+ }
+ if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
+ assert(SI->getValueOperand()->getType() == AI->getAllocatedType());
+ Insts.push_back(SI);
+ continue;
+ }
+
+ // For everything else, we know that only no-op bitcasts and GEPs will
+ // make it this far, just recurse through them and recall them for later
+ // removal.
+ DeadInsts.push_back(I);
+ enqueueUsersInWorklist(*I, Worklist, Visited);
}
AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts);
- Insts.clear();
+ while (!DeadInsts.empty())
+ DeadInsts.pop_back_val()->eraseFromParent();
+ AI->eraseFromParent();
}
PromotableAllocas.clear();
return true;
}
-namespace {
- /// \brief A predicate to test whether an alloca belongs to a set.
- class IsAllocaInSet {
- typedef SmallPtrSet<AllocaInst *, 4> SetType;
- const SetType &Set;
-
- public:
- typedef AllocaInst *argument_type;
-
- IsAllocaInSet(const SetType &Set) : Set(Set) {}
- bool operator()(AllocaInst *AI) const { return Set.count(AI); }
- };
-}
-
bool SROA::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
C = &F.getContext();
- TD = getAnalysisIfAvailable<DataLayout>();
- if (!TD) {
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ if (!DLP) {
DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
return false;
}
- DT = getAnalysisIfAvailable<DominatorTree>();
+ DL = &DLP->getDataLayout();
+ DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ DT = DTWP ? &DTWP->getDomTree() : nullptr;
BasicBlock &EntryBB = F.getEntryBlock();
- for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end());
+ for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end());
I != E; ++I)
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
Worklist.insert(AI);
// Remove the deleted allocas from various lists so that we don't try to
// continue processing them.
if (!DeletedAllocas.empty()) {
- Worklist.remove_if(IsAllocaInSet(DeletedAllocas));
- PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas));
+ auto IsInSet = [&](AllocaInst *AI) {
+ return DeletedAllocas.count(AI);
+ };
+ Worklist.remove_if(IsInSet);
+ PostPromotionWorklist.remove_if(IsInSet);
PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
PromotableAllocas.end(),
- IsAllocaInSet(DeletedAllocas)),
+ IsInSet),
PromotableAllocas.end());
DeletedAllocas.clear();
}
void SROA::getAnalysisUsage(AnalysisUsage &AU) const {
if (RequiresDomTree)
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.setPreservesCFG();
}