///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "sroa"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/AssumptionTracker.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/PtrUseVisitor.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/DIBuilder.h"
-#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Operator.h"
-#include "llvm/InstVisitor.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/TimeValue.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
+
+#if __cplusplus >= 201103L && !defined(NDEBUG)
+// We only use this for a debug check in C++11
+#include <random>
+#endif
+
using namespace llvm;
+#define DEBUG_TYPE "sroa"
+
STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed");
-STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions");
-STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses found");
-STATISTIC(MaxPartitionUsesPerAlloca, "Maximum number of partition uses");
+STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca");
+STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten");
+STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition");
STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
static cl::opt<bool>
ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden);
+/// Hidden option to enable randomly shuffling the slices to help uncover
+/// instability in their order.
+static cl::opt<bool> SROARandomShuffleSlices("sroa-random-shuffle-slices",
+ cl::init(false), cl::Hidden);
+
+/// Hidden option to experiment with completely strict handling of inbounds
+/// GEPs.
+static cl::opt<bool> SROAStrictInbounds("sroa-strict-inbounds",
+ cl::init(false), cl::Hidden);
+
namespace {
/// \brief A custom IRBuilder inserter which prefixes all names if they are
/// preserved.
}
namespace {
-/// \brief A partition of an alloca.
+/// \brief A used slice of an alloca.
///
-/// This structure represents a contiguous partition of the alloca. These are
-/// formed by examining the uses of the alloca. During formation, they may
-/// overlap but once an AllocaPartitioning is built, the Partitions within it
-/// are all disjoint. The partition also contains a chain of uses of that
-/// partition.
-class Partition {
+/// This structure represents a slice of an alloca used by some instruction. It
+/// stores both the begin and end offsets of this use, a pointer to the use
+/// itself, and a flag indicating whether we can classify the use as splittable
+/// or not when forming partitions of the alloca.
+class Slice {
/// \brief The beginning offset of the range.
uint64_t BeginOffset;
/// \brief The ending offset, not included in the range.
uint64_t EndOffset;
- /// \brief Storage for both the use of this partition and whether it can be
+ /// \brief Storage for both the use of this slice and whether it can be
/// split.
- PointerIntPair<Use *, 1, bool> PartitionUseAndIsSplittable;
+ PointerIntPair<Use *, 1, bool> UseAndIsSplittable;
public:
- Partition() : BeginOffset(), EndOffset() {}
- Partition(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable)
+ Slice() : BeginOffset(), EndOffset() {}
+ Slice(uint64_t BeginOffset, uint64_t EndOffset, Use *U, bool IsSplittable)
: BeginOffset(BeginOffset), EndOffset(EndOffset),
- PartitionUseAndIsSplittable(U, IsSplittable) {}
+ UseAndIsSplittable(U, IsSplittable) {}
uint64_t beginOffset() const { return BeginOffset; }
uint64_t endOffset() const { return EndOffset; }
- bool isSplittable() const { return PartitionUseAndIsSplittable.getInt(); }
- void makeUnsplittable() { PartitionUseAndIsSplittable.setInt(false); }
+ bool isSplittable() const { return UseAndIsSplittable.getInt(); }
+ void makeUnsplittable() { UseAndIsSplittable.setInt(false); }
- Use *getUse() const { return PartitionUseAndIsSplittable.getPointer(); }
+ Use *getUse() const { return UseAndIsSplittable.getPointer(); }
- bool isDead() const { return getUse() == 0; }
- void kill() { PartitionUseAndIsSplittable.setPointer(0); }
+ bool isDead() const { return getUse() == nullptr; }
+ void kill() { UseAndIsSplittable.setPointer(nullptr); }
/// \brief Support for ordering ranges.
///
/// always increasing, and within equal start offsets, the end offsets are
/// decreasing. Thus the spanning range comes first in a cluster with the
/// same start position.
- bool operator<(const Partition &RHS) const {
+ bool operator<(const Slice &RHS) const {
if (beginOffset() < RHS.beginOffset()) return true;
if (beginOffset() > RHS.beginOffset()) return false;
if (isSplittable() != RHS.isSplittable()) return !isSplittable();
}
/// \brief Support comparison with a single offset to allow binary searches.
- friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Partition &LHS,
+ friend LLVM_ATTRIBUTE_UNUSED bool operator<(const Slice &LHS,
uint64_t RHSOffset) {
return LHS.beginOffset() < RHSOffset;
}
friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
- const Partition &RHS) {
+ const Slice &RHS) {
return LHSOffset < RHS.beginOffset();
}
- bool operator==(const Partition &RHS) const {
+ bool operator==(const Slice &RHS) const {
return isSplittable() == RHS.isSplittable() &&
beginOffset() == RHS.beginOffset() && endOffset() == RHS.endOffset();
}
- bool operator!=(const Partition &RHS) const { return !operator==(RHS); }
+ bool operator!=(const Slice &RHS) const { return !operator==(RHS); }
};
} // end anonymous namespace
namespace llvm {
template <typename T> struct isPodLike;
-template <> struct isPodLike<Partition> {
+template <> struct isPodLike<Slice> {
static const bool value = true;
};
}
namespace {
-/// \brief Alloca partitioning representation.
+/// \brief Representation of the alloca slices.
///
-/// This class represents a partitioning of an alloca into slices, and
-/// information about the nature of uses of each slice of the alloca. The goal
-/// is that this information is sufficient to decide if and how to split the
-/// alloca apart and replace slices with scalars. It is also intended that this
-/// structure can capture the relevant information needed both to decide about
-/// and to enact these transformations.
-class AllocaPartitioning {
+/// This class represents the slices of an alloca which are formed by its
+/// various uses. If a pointer escapes, we can't fully build a representation
+/// for the slices used and we reflect that in this structure. The uses are
+/// stored, sorted by increasing beginning offset and with unsplittable slices
+/// starting at a particular offset before splittable slices.
+class AllocaSlices {
public:
- /// \brief Construct a partitioning of a particular alloca.
- ///
- /// Construction does most of the work for partitioning the alloca. This
- /// performs the necessary walks of users and builds a partitioning from it.
- AllocaPartitioning(const DataLayout &TD, AllocaInst &AI);
+ /// \brief Construct the slices of a particular alloca.
+ AllocaSlices(const DataLayout &DL, AllocaInst &AI);
/// \brief Test whether a pointer to the allocation escapes our analysis.
///
- /// If this is true, the partitioning is never fully built and should be
+ /// If this is true, the slices are never fully built and should be
/// ignored.
bool isEscaped() const { return PointerEscapingInstr; }
- /// \brief Support for iterating over the partitions.
+ /// \brief Support for iterating over the slices.
/// @{
- typedef SmallVectorImpl<Partition>::iterator iterator;
- iterator begin() { return Partitions.begin(); }
- iterator end() { return Partitions.end(); }
-
- typedef SmallVectorImpl<Partition>::const_iterator const_iterator;
- const_iterator begin() const { return Partitions.begin(); }
- const_iterator end() const { return Partitions.end(); }
+ typedef SmallVectorImpl<Slice>::iterator iterator;
+ typedef iterator_range<iterator> range;
+ iterator begin() { return Slices.begin(); }
+ iterator end() { return Slices.end(); }
+
+ typedef SmallVectorImpl<Slice>::const_iterator const_iterator;
+ typedef iterator_range<const_iterator> const_range;
+ const_iterator begin() const { return Slices.begin(); }
+ const_iterator end() const { return Slices.end(); }
/// @}
- /// \brief Allow iterating the dead users for this alloca.
- ///
- /// These are instructions which will never actually use the alloca as they
- /// are outside the allocated range. They are safe to replace with undef and
- /// delete.
- /// @{
- typedef SmallVectorImpl<Instruction *>::const_iterator dead_user_iterator;
- dead_user_iterator dead_user_begin() const { return DeadUsers.begin(); }
- dead_user_iterator dead_user_end() const { return DeadUsers.end(); }
- /// @}
+ /// \brief Access the dead users for this alloca.
+ ArrayRef<Instruction *> getDeadUsers() const { return DeadUsers; }
- /// \brief Allow iterating the dead expressions referring to this alloca.
+ /// \brief Access the dead operands referring to this alloca.
///
/// These are operands which have cannot actually be used to refer to the
/// alloca as they are outside its range and the user doesn't correct for
/// that. These mostly consist of PHI node inputs and the like which we just
/// need to replace with undef.
- /// @{
- typedef SmallVectorImpl<Use *>::const_iterator dead_op_iterator;
- dead_op_iterator dead_op_begin() const { return DeadOperands.begin(); }
- dead_op_iterator dead_op_end() const { return DeadOperands.end(); }
- /// @}
+ ArrayRef<Use *> getDeadOperands() const { return DeadOperands; }
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const;
- void printPartition(raw_ostream &OS, const_iterator I,
- StringRef Indent = " ") const;
+ void printSlice(raw_ostream &OS, const_iterator I,
+ StringRef Indent = " ") const;
void printUse(raw_ostream &OS, const_iterator I,
StringRef Indent = " ") const;
void print(raw_ostream &OS) const;
- void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const;
- void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump() const;
+ void dump(const_iterator I) const;
+ void dump() const;
#endif
private:
template <typename DerivedT, typename RetT = void> class BuilderBase;
- class PartitionBuilder;
- friend class AllocaPartitioning::PartitionBuilder;
+ class SliceBuilder;
+ friend class AllocaSlices::SliceBuilder;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// \brief Handle to alloca instruction to simplify method interfaces.
AllocaInst &AI;
#endif
- /// \brief The instruction responsible for this alloca having no partitioning.
+ /// \brief The instruction responsible for this alloca not having a known set
+ /// of slices.
///
/// When an instruction (potentially) escapes the pointer to the alloca, we
- /// store a pointer to that here and abort trying to partition the alloca.
- /// This will be null if the alloca is partitioned successfully.
+ /// store a pointer to that here and abort trying to form slices of the
+ /// alloca. This will be null if the alloca slices are analyzed successfully.
Instruction *PointerEscapingInstr;
- /// \brief The partitions of the alloca.
+ /// \brief The slices of the alloca.
///
- /// We store a vector of the partitions over the alloca here. This vector is
- /// sorted by increasing begin offset, and then by decreasing end offset. See
- /// the Partition inner class for more details. Initially (during
- /// construction) there are overlaps, but we form a disjoint sequence of
- /// partitions while finishing construction and a fully constructed object is
- /// expected to always have this as a disjoint space.
- SmallVector<Partition, 8> Partitions;
+ /// We store a vector of the slices formed by uses of the alloca here. This
+ /// vector is sorted by increasing begin offset, and then the unsplittable
+ /// slices before the splittable ones. See the Slice inner class for more
+ /// details.
+ SmallVector<Slice, 8> Slices;
/// \brief Instructions which will become dead if we rewrite the alloca.
///
- /// Note that these are not separated by partition. This is because we expect
- /// a partitioned alloca to be completely rewritten or not rewritten at all.
- /// If rewritten, all these instructions can simply be removed and replaced
- /// with undef as they come from outside of the allocated space.
+ /// Note that these are not separated by slice. This is because we expect an
+ /// alloca to be completely rewritten or not rewritten at all. If rewritten,
+ /// all these instructions can simply be removed and replaced with undef as
+ /// they come from outside of the allocated space.
SmallVector<Instruction *, 8> DeadUsers;
/// \brief Operands which will become dead if we rewrite the alloca.
if (SI.getOperand(1) == SI.getOperand(2))
return SI.getOperand(1);
- return 0;
+ return nullptr;
+}
+
+/// \brief A helper that folds a PHI node or a select.
+static Value *foldPHINodeOrSelectInst(Instruction &I) {
+ if (PHINode *PN = dyn_cast<PHINode>(&I)) {
+ // If PN merges together the same value, return that value.
+ return PN->hasConstantValue();
+ }
+ return foldSelectInst(cast<SelectInst>(I));
}
-/// \brief Builder for the alloca partitioning.
+/// \brief Builder for the alloca slices.
///
-/// This class builds an alloca partitioning by recursively visiting the uses
-/// of an alloca and splitting the partitions for each load and store at each
-/// offset.
-class AllocaPartitioning::PartitionBuilder
- : public PtrUseVisitor<PartitionBuilder> {
- friend class PtrUseVisitor<PartitionBuilder>;
- friend class InstVisitor<PartitionBuilder>;
- typedef PtrUseVisitor<PartitionBuilder> Base;
+/// This class builds a set of alloca slices by recursively visiting the uses
+/// of an alloca and making a slice for each load and store at each offset.
+class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> {
+ friend class PtrUseVisitor<SliceBuilder>;
+ friend class InstVisitor<SliceBuilder>;
+ typedef PtrUseVisitor<SliceBuilder> Base;
const uint64_t AllocSize;
- AllocaPartitioning &P;
+ AllocaSlices &AS;
- SmallDenseMap<Instruction *, unsigned> MemTransferPartitionMap;
+ SmallDenseMap<Instruction *, unsigned> MemTransferSliceMap;
SmallDenseMap<Instruction *, uint64_t> PHIOrSelectSizes;
/// \brief Set to de-duplicate dead instructions found in the use walk.
SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
public:
- PartitionBuilder(const DataLayout &DL, AllocaInst &AI, AllocaPartitioning &P)
- : PtrUseVisitor<PartitionBuilder>(DL),
- AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())),
- P(P) {}
+ SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS)
+ : PtrUseVisitor<SliceBuilder>(DL),
+ AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), AS(AS) {}
private:
void markAsDead(Instruction &I) {
if (VisitedDeadInsts.insert(&I))
- P.DeadUsers.push_back(&I);
+ AS.DeadUsers.push_back(&I);
}
void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
bool IsSplittable = false) {
// Completely skip uses which have a zero size or start either before or
// past the end of the allocation.
- if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize)) {
+ if (Size == 0 || Offset.uge(AllocSize)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
<< " which has zero size or starts outside of the "
<< AllocSize << " byte alloca:\n"
- << " alloca: " << P.AI << "\n"
+ << " alloca: " << AS.AI << "\n"
<< " use: " << I << "\n");
return markAsDead(I);
}
if (Size > AllocSize - BeginOffset) {
DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
<< " to remain within the " << AllocSize << " byte alloca:\n"
- << " alloca: " << P.AI << "\n"
+ << " alloca: " << AS.AI << "\n"
<< " use: " << I << "\n");
EndOffset = AllocSize;
}
- P.Partitions.push_back(Partition(BeginOffset, EndOffset, U, IsSplittable));
+ AS.Slices.push_back(Slice(BeginOffset, EndOffset, U, IsSplittable));
}
void visitBitCastInst(BitCastInst &BC) {
if (GEPI.use_empty())
return markAsDead(GEPI);
+ if (SROAStrictInbounds && GEPI.isInBounds()) {
+ // FIXME: This is a manually un-factored variant of the basic code inside
+ // of GEPs with checking of the inbounds invariant specified in the
+ // langref in a very strict sense. If we ever want to enable
+ // SROAStrictInbounds, this code should be factored cleanly into
+ // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds
+ // by writing out the code here where we have tho underlying allocation
+ // size readily available.
+ APInt GEPOffset = Offset;
+ for (gep_type_iterator GTI = gep_type_begin(GEPI),
+ GTE = gep_type_end(GEPI);
+ GTI != GTE; ++GTI) {
+ ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
+ if (!OpC)
+ break;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ unsigned ElementIdx = OpC->getZExtValue();
+ const StructLayout *SL = DL.getStructLayout(STy);
+ GEPOffset +=
+ APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
+ } else {
+ // For array or vector indices, scale the index by the size of the type.
+ APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth());
+ GEPOffset += Index * APInt(Offset.getBitWidth(),
+ DL.getTypeAllocSize(GTI.getIndexedType()));
+ }
+
+ // If this index has computed an intermediate pointer which is not
+ // inbounds, then the result of the GEP is a poison value and we can
+ // delete it and all uses.
+ if (GEPOffset.ugt(AllocSize))
+ return markAsDead(GEPI);
+ }
+ }
+
return Base::visitGetElementPtrInst(GEPI);
}
// risk of overflow.
// FIXME: We should instead consider the pointer to have escaped if this
// function is being instrumented for addressing bugs or race conditions.
- if (Offset.isNegative() || Size > AllocSize ||
- Offset.ugt(AllocSize - Size)) {
+ if (Size > AllocSize || Offset.ugt(AllocSize - Size)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset
<< " which extends past the end of the " << AllocSize
<< " byte alloca:\n"
- << " alloca: " << P.AI << "\n"
+ << " alloca: " << AS.AI << "\n"
<< " use: " << SI << "\n");
return markAsDead(SI);
}
assert(II.getRawDest() == *U && "Pointer use is not the destination?");
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
if ((Length && Length->getValue() == 0) ||
- (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
+ (IsOffsetKnown && Offset.uge(AllocSize)))
// Zero-length mem transfer intrinsics can be ignored entirely.
return markAsDead(II);
void visitMemTransferInst(MemTransferInst &II) {
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
- if ((Length && Length->getValue() == 0) ||
- (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
+ if (Length && Length->getValue() == 0)
// Zero-length mem transfer intrinsics can be ignored entirely.
return markAsDead(II);
+ // Because we can visit these intrinsics twice, also check to see if the
+ // first time marked this instruction as dead. If so, skip it.
+ if (VisitedDeadInsts.count(&II))
+ return;
+
if (!IsOffsetKnown)
return PI.setAborted(&II);
+ // This side of the transfer is completely out-of-bounds, and so we can
+ // nuke the entire transfer. However, we also need to nuke the other side
+ // if already added to our partitions.
+ // FIXME: Yet another place we really should bypass this when
+ // instrumenting for ASan.
+ if (Offset.uge(AllocSize)) {
+ SmallDenseMap<Instruction *, unsigned>::iterator MTPI = MemTransferSliceMap.find(&II);
+ if (MTPI != MemTransferSliceMap.end())
+ AS.Slices[MTPI->second].kill();
+ return markAsDead(II);
+ }
+
uint64_t RawOffset = Offset.getLimitedValue();
uint64_t Size = Length ? Length->getLimitedValue()
: AllocSize - RawOffset;
if (!II.isVolatile())
return markAsDead(II);
- return insertUse(II, Offset, Size, /*IsSplittable=*/false);;
+ return insertUse(II, Offset, Size, /*IsSplittable=*/false);
}
// If we have seen both source and destination for a mem transfer, then
// they both point to the same alloca.
bool Inserted;
SmallDenseMap<Instruction *, unsigned>::iterator MTPI;
- llvm::tie(MTPI, Inserted) =
- MemTransferPartitionMap.insert(std::make_pair(&II, P.Partitions.size()));
+ std::tie(MTPI, Inserted) =
+ MemTransferSliceMap.insert(std::make_pair(&II, AS.Slices.size()));
unsigned PrevIdx = MTPI->second;
if (!Inserted) {
- Partition &PrevP = P.Partitions[PrevIdx];
+ Slice &PrevP = AS.Slices[PrevIdx];
// Check if the begin offsets match and this is a non-volatile transfer.
// In that case, we can completely elide the transfer.
insertUse(II, Offset, Size, /*IsSplittable=*/Inserted && Length);
// Check that we ended up with a valid index in the map.
- assert(P.Partitions[PrevIdx].getUse()->getUser() == &II &&
- "Map index doesn't point back to a partition with this user.");
+ assert(AS.Slices[PrevIdx].getUse()->getUser() == &II &&
+ "Map index doesn't point back to a slice with this user.");
}
// Disable SRoA for any intrinsics except for lifetime invariants.
Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
// We consider any PHI or select that results in a direct load or store of
- // the same offset to be a viable use for partitioning purposes. These uses
+ // the same offset to be a viable use for slicing purposes. These uses
// are considered unsplittable and the size is the maximum loaded or stored
// size.
SmallPtrSet<Instruction *, 4> Visited;
Size = 0;
do {
Instruction *I, *UsedI;
- llvm::tie(UsedI, I) = Uses.pop_back_val();
+ std::tie(UsedI, I) = Uses.pop_back_val();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Size = std::max(Size, DL.getTypeStoreSize(LI->getType()));
return I;
}
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
- ++UI)
- if (Visited.insert(cast<Instruction>(*UI)))
- Uses.push_back(std::make_pair(I, cast<Instruction>(*UI)));
+ for (User *U : I->users())
+ if (Visited.insert(cast<Instruction>(U)))
+ Uses.push_back(std::make_pair(I, cast<Instruction>(U)));
} while (!Uses.empty());
- return 0;
+ return nullptr;
}
- void visitPHINode(PHINode &PN) {
- if (PN.use_empty())
- return markAsDead(PN);
- if (!IsOffsetKnown)
- return PI.setAborted(&PN);
-
- // See if we already have computed info on this node.
- uint64_t &PHISize = PHIOrSelectSizes[&PN];
- if (!PHISize) {
- // This is a new PHI node, check for an unsafe use of the PHI node.
- if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&PN, PHISize))
- return PI.setAborted(UnsafeI);
- }
-
- // For PHI and select operands outside the alloca, we can't nuke the entire
- // phi or select -- the other side might still be relevant, so we special
- // case them here and use a separate structure to track the operands
- // themselves which should be replaced with undef.
- // FIXME: This should instead be escaped in the event we're instrumenting
- // for address sanitization.
- if ((Offset.isNegative() && (-Offset).uge(PHISize)) ||
- (!Offset.isNegative() && Offset.uge(AllocSize))) {
- P.DeadOperands.push_back(U);
- return;
- }
-
- insertUse(PN, Offset, PHISize);
- }
+ void visitPHINodeOrSelectInst(Instruction &I) {
+ assert(isa<PHINode>(I) || isa<SelectInst>(I));
+ if (I.use_empty())
+ return markAsDead(I);
- void visitSelectInst(SelectInst &SI) {
- if (SI.use_empty())
- return markAsDead(SI);
- if (Value *Result = foldSelectInst(SI)) {
+ // TODO: We could use SimplifyInstruction here to fold PHINodes and
+ // SelectInsts. However, doing so requires to change the current
+ // dead-operand-tracking mechanism. For instance, suppose neither loading
+ // from %U nor %other traps. Then "load (select undef, %U, %other)" does not
+ // trap either. However, if we simply replace %U with undef using the
+ // current dead-operand-tracking mechanism, "load (select undef, undef,
+ // %other)" may trap because the select may return the first operand
+ // "undef".
+ if (Value *Result = foldPHINodeOrSelectInst(I)) {
if (Result == *U)
// If the result of the constant fold will be the pointer, recurse
- // through the select as if we had RAUW'ed it.
- enqueueUsers(SI);
+ // through the PHI/select as if we had RAUW'ed it.
+ enqueueUsers(I);
else
- // Otherwise the operand to the select is dead, and we can replace it
- // with undef.
- P.DeadOperands.push_back(U);
+ // Otherwise the operand to the PHI/select is dead, and we can replace
+ // it with undef.
+ AS.DeadOperands.push_back(U);
return;
}
+
if (!IsOffsetKnown)
- return PI.setAborted(&SI);
+ return PI.setAborted(&I);
// See if we already have computed info on this node.
- uint64_t &SelectSize = PHIOrSelectSizes[&SI];
- if (!SelectSize) {
- // This is a new Select, check for an unsafe use of it.
- if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&SI, SelectSize))
+ uint64_t &Size = PHIOrSelectSizes[&I];
+ if (!Size) {
+ // This is a new PHI/Select, check for an unsafe use of it.
+ if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&I, Size))
return PI.setAborted(UnsafeI);
}
// themselves which should be replaced with undef.
// FIXME: This should instead be escaped in the event we're instrumenting
// for address sanitization.
- if ((Offset.isNegative() && Offset.uge(SelectSize)) ||
- (!Offset.isNegative() && Offset.uge(AllocSize))) {
- P.DeadOperands.push_back(U);
+ if (Offset.uge(AllocSize)) {
+ AS.DeadOperands.push_back(U);
return;
}
- insertUse(SI, Offset, SelectSize);
+ insertUse(I, Offset, Size);
+ }
+
+ void visitPHINode(PHINode &PN) {
+ visitPHINodeOrSelectInst(PN);
+ }
+
+ void visitSelectInst(SelectInst &SI) {
+ visitPHINodeOrSelectInst(SI);
}
/// \brief Disable SROA entirely if there are unhandled users of the alloca.
}
};
-namespace {
-struct IsPartitionDead {
- bool operator()(const Partition &P) { return P.isDead(); }
-};
-}
-
-AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI)
+AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
:
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
AI(AI),
#endif
- PointerEscapingInstr(0) {
- PartitionBuilder PB(TD, AI, *this);
- PartitionBuilder::PtrInfo PtrI = PB.visitPtr(AI);
+ PointerEscapingInstr(nullptr) {
+ SliceBuilder PB(DL, AI, *this);
+ SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI);
if (PtrI.isEscaped() || PtrI.isAborted()) {
// FIXME: We should sink the escape vs. abort info into the caller nicely,
- // possibly by just storing the PtrInfo in the AllocaPartitioning.
+ // possibly by just storing the PtrInfo in the AllocaSlices.
PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst()
: PtrI.getAbortingInst();
assert(PointerEscapingInstr && "Did not track a bad instruction");
return;
}
- // Sort the uses. This arranges for the offsets to be in ascending order,
- // and the sizes to be in descending order.
- std::sort(Partitions.begin(), Partitions.end());
-
- Partitions.erase(
- std::remove_if(Partitions.begin(), Partitions.end(), IsPartitionDead()),
- Partitions.end());
+ Slices.erase(std::remove_if(Slices.begin(), Slices.end(),
+ std::mem_fun_ref(&Slice::isDead)),
+ Slices.end());
- // Record how many partitions we end up with.
- NumAllocaPartitions += Partitions.size();
- MaxPartitionsPerAlloca = std::max<unsigned>(Partitions.size(), MaxPartitionsPerAlloca);
+#if __cplusplus >= 201103L && !defined(NDEBUG)
+ if (SROARandomShuffleSlices) {
+ std::mt19937 MT(static_cast<unsigned>(sys::TimeValue::now().msec()));
+ std::shuffle(Slices.begin(), Slices.end(), MT);
+ }
+#endif
- NumAllocaPartitionUses += Partitions.size();
- MaxPartitionUsesPerAlloca =
- std::max<unsigned>(Partitions.size(), MaxPartitionUsesPerAlloca);
+ // Sort the uses. This arranges for the offsets to be in ascending order,
+ // and the sizes to be in descending order.
+ std::sort(Slices.begin(), Slices.end());
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-void AllocaPartitioning::print(raw_ostream &OS, const_iterator I,
- StringRef Indent) const {
- printPartition(OS, I, Indent);
+void AllocaSlices::print(raw_ostream &OS, const_iterator I,
+ StringRef Indent) const {
+ printSlice(OS, I, Indent);
printUse(OS, I, Indent);
}
-void AllocaPartitioning::printPartition(raw_ostream &OS, const_iterator I,
- StringRef Indent) const {
+void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I,
+ StringRef Indent) const {
OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")"
- << " partition #" << (I - begin())
+ << " slice #" << (I - begin())
<< (I->isSplittable() ? " (splittable)" : "") << "\n";
}
-void AllocaPartitioning::printUse(raw_ostream &OS, const_iterator I,
- StringRef Indent) const {
+void AllocaSlices::printUse(raw_ostream &OS, const_iterator I,
+ StringRef Indent) const {
OS << Indent << " used by: " << *I->getUse()->getUser() << "\n";
}
-void AllocaPartitioning::print(raw_ostream &OS) const {
+void AllocaSlices::print(raw_ostream &OS) const {
if (PointerEscapingInstr) {
- OS << "No partitioning for alloca: " << AI << "\n"
+ OS << "Can't analyze slices for alloca: " << AI << "\n"
<< " A pointer to this alloca escaped by:\n"
<< " " << *PointerEscapingInstr << "\n";
return;
}
- OS << "Partitioning of alloca: " << AI << "\n";
+ OS << "Slices of alloca: " << AI << "\n";
for (const_iterator I = begin(), E = end(); I != E; ++I)
print(OS, I);
}
-void AllocaPartitioning::dump(const_iterator I) const { print(dbgs(), I); }
-void AllocaPartitioning::dump() const { print(dbgs()); }
+LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const {
+ print(dbgs(), I);
+}
+LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); }
#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
SmallVector<DbgValueInst *, 4> DVIs;
public:
- AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
+ AllocaPromoter(const SmallVectorImpl<Instruction *> &Insts, SSAUpdater &S,
AllocaInst &AI, DIBuilder &DIB)
- : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
+ : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
void run(const SmallVectorImpl<Instruction*> &Insts) {
- // Remember which alloca we're promoting (for isInstInList).
+ // Retain the debug information attached to the alloca for use when
+ // rewriting loads and stores.
if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) {
- for (Value::use_iterator UI = DebugNode->use_begin(),
- UE = DebugNode->use_end();
- UI != UE; ++UI)
- if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
+ for (User *U : DebugNode->users())
+ if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
DDIs.push_back(DDI);
- else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
+ else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
DVIs.push_back(DVI);
}
LoadAndStorePromoter::run(Insts);
- AI.eraseFromParent();
+
+ // While we have the debug information, clear it off of the alloca. The
+ // caller takes care of deleting the alloca.
while (!DDIs.empty())
DDIs.pop_back_val()->eraseFromParent();
while (!DVIs.empty())
DVIs.pop_back_val()->eraseFromParent();
}
- virtual bool isInstInList(Instruction *I,
- const SmallVectorImpl<Instruction*> &Insts) const {
+ bool isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction*> &Insts) const override {
+ Value *Ptr;
if (LoadInst *LI = dyn_cast<LoadInst>(I))
- return LI->getOperand(0) == &AI;
- return cast<StoreInst>(I)->getPointerOperand() == &AI;
+ Ptr = LI->getOperand(0);
+ else
+ Ptr = cast<StoreInst>(I)->getPointerOperand();
+
+ // Only used to detect cycles, which will be rare and quickly found as
+ // we're walking up a chain of defs rather than down through uses.
+ SmallPtrSet<Value *, 4> Visited;
+
+ do {
+ if (Ptr == &AI)
+ return true;
+
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(Ptr))
+ Ptr = BCI->getOperand(0);
+ else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
+ Ptr = GEPI->getPointerOperand();
+ else
+ return false;
+
+ } while (Visited.insert(Ptr));
+
+ return false;
}
- virtual void updateDebugInfo(Instruction *Inst) const {
- for (SmallVectorImpl<DbgDeclareInst *>::const_iterator I = DDIs.begin(),
- E = DDIs.end(); I != E; ++I) {
- DbgDeclareInst *DDI = *I;
+ void updateDebugInfo(Instruction *Inst) const override {
+ for (DbgDeclareInst *DDI : DDIs)
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
- }
- for (SmallVectorImpl<DbgValueInst *>::const_iterator I = DVIs.begin(),
- E = DVIs.end(); I != E; ++I) {
- DbgValueInst *DVI = *I;
- Value *Arg = 0;
+ for (DbgValueInst *DVI : DVIs) {
+ Value *Arg = nullptr;
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
// If an argument is zero extended then use argument directly. The ZExt
// may be zapped by an optimization pass in future.
continue;
}
Instruction *DbgVal =
- DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
- Inst);
+ DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
+ DIExpression(DVI->getExpression()), Inst);
DbgVal->setDebugLoc(DVI->getDebugLoc());
}
}
const bool RequiresDomTree;
LLVMContext *C;
- const DataLayout *TD;
+ const DataLayout *DL;
DominatorTree *DT;
+ AssumptionTracker *AT;
/// \brief Worklist of alloca instructions to simplify.
///
public:
SROA(bool RequiresDomTree = true)
: FunctionPass(ID), RequiresDomTree(RequiresDomTree),
- C(0), TD(0), DT(0) {
+ C(nullptr), DL(nullptr), DT(nullptr) {
initializeSROAPass(*PassRegistry::getPassRegistry());
}
- bool runOnFunction(Function &F);
- void getAnalysisUsage(AnalysisUsage &AU) const;
+ bool runOnFunction(Function &F) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
- const char *getPassName() const { return "SROA"; }
+ const char *getPassName() const override { return "SROA"; }
static char ID;
private:
friend class PHIOrSelectSpeculator;
- friend class AllocaPartitionRewriter;
- friend class AllocaPartitionVectorRewriter;
-
- bool rewritePartitions(AllocaInst &AI, AllocaPartitioning &P,
- AllocaPartitioning::iterator B,
- AllocaPartitioning::iterator E,
- int64_t BeginOffset, int64_t EndOffset,
- ArrayRef<AllocaPartitioning::iterator> SplitUses);
- bool splitAlloca(AllocaInst &AI, AllocaPartitioning &P);
+ friend class AllocaSliceRewriter;
+
+ bool rewritePartition(AllocaInst &AI, AllocaSlices &AS,
+ AllocaSlices::iterator B, AllocaSlices::iterator E,
+ int64_t BeginOffset, int64_t EndOffset,
+ ArrayRef<AllocaSlices::iterator> SplitUses);
+ bool splitAlloca(AllocaInst &AI, AllocaSlices &AS);
bool runOnAlloca(AllocaInst &AI);
- void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas);
+ void clobberUse(Use &U);
+ void deleteDeadInstructions(SmallPtrSetImpl<AllocaInst *> &DeletedAllocas);
bool promoteAllocas(Function &F);
};
}
INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates",
false, false)
-INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(AssumptionTracker)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
false, false)
-/// Walk a range of a partitioning looking for a common type to cover this
-/// sequence of partition uses.
-static Type *findCommonType(AllocaPartitioning::const_iterator B,
- AllocaPartitioning::const_iterator E,
+/// Walk the range of a partitioning looking for a common type to cover this
+/// sequence of slices.
+static Type *findCommonType(AllocaSlices::const_iterator B,
+ AllocaSlices::const_iterator E,
uint64_t EndOffset) {
- Type *Ty = 0;
- for (AllocaPartitioning::const_iterator I = B; I != E; ++I) {
+ Type *Ty = nullptr;
+ bool TyIsCommon = true;
+ IntegerType *ITy = nullptr;
+
+ // Note that we need to look at *every* alloca slice's Use to ensure we
+ // always get consistent results regardless of the order of slices.
+ for (AllocaSlices::const_iterator I = B; I != E; ++I) {
Use *U = I->getUse();
if (isa<IntrinsicInst>(*U->getUser()))
continue;
if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset)
continue;
- Type *UserTy = 0;
- if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser()))
+ Type *UserTy = nullptr;
+ if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
UserTy = LI->getType();
- else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser()))
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
UserTy = SI->getValueOperand()->getType();
- else
- return 0; // Bail if we have weird uses.
+ }
- if (IntegerType *ITy = dyn_cast<IntegerType>(UserTy)) {
+ if (IntegerType *UserITy = dyn_cast_or_null<IntegerType>(UserTy)) {
// If the type is larger than the partition, skip it. We only encounter
- // this for split integer operations where we want to use the type of
- // the
- // entity causing the split.
- if (ITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
+ // this for split integer operations where we want to use the type of the
+ // entity causing the split. Also skip if the type is not a byte width
+ // multiple.
+ if (UserITy->getBitWidth() % 8 != 0 ||
+ UserITy->getBitWidth() / 8 > (EndOffset - B->beginOffset()))
continue;
- // If we have found an integer type use covering the alloca, use that
- // regardless of the other types, as integers are often used for a
- // "bucket
- // of bits" type.
- return ITy;
+ // Track the largest bitwidth integer type used in this way in case there
+ // is no common type.
+ if (!ITy || ITy->getBitWidth() < UserITy->getBitWidth())
+ ITy = UserITy;
}
- if (Ty && Ty != UserTy)
- return 0;
-
- Ty = UserTy;
+ // To avoid depending on the order of slices, Ty and TyIsCommon must not
+ // depend on types skipped above.
+ if (!UserTy || (Ty && Ty != UserTy))
+ TyIsCommon = false; // Give up on anything but an iN type.
+ else
+ Ty = UserTy;
}
- return Ty;
+
+ return TyIsCommon ? Ty : ITy;
}
/// PHI instructions that use an alloca and are subsequently loaded can be
/// FIXME: This should be hoisted into a generic utility, likely in
/// Transforms/Util/Local.h
static bool isSafePHIToSpeculate(PHINode &PN,
- const DataLayout *TD = 0) {
+ const DataLayout *DL = nullptr) {
// For now, we can only do this promotion if the load is in the same block
// as the PHI, and if there are no stores between the phi and load.
// TODO: Allow recursive phi users.
BasicBlock *BB = PN.getParent();
unsigned MaxAlign = 0;
bool HaveLoad = false;
- for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end(); UI != UE;
- ++UI) {
- LoadInst *LI = dyn_cast<LoadInst>(*UI);
- if (LI == 0 || !LI->isSimple())
+ for (User *U : PN.users()) {
+ LoadInst *LI = dyn_cast<LoadInst>(U);
+ if (!LI || !LI->isSimple())
return false;
// For now we only allow loads in the same block as the PHI. This is
// If this pointer is always safe to load, or if we can prove that there
// is already a load in the block, then we can move the load to the pred
// block.
- if (InVal->isDereferenceablePointer() ||
- isSafeToLoadUnconditionally(InVal, TI, MaxAlign, TD))
+ if (InVal->isDereferenceablePointer(DL) ||
+ isSafeToLoadUnconditionally(InVal, TI, MaxAlign, DL))
continue;
return false;
PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
PN.getName() + ".sroa.speculated");
- // Get the TBAA tag and alignment to use from one of the loads. It doesn't
+ // Get the AA tags and alignment to use from one of the loads. It doesn't
// matter which one we get and if any differ.
- LoadInst *SomeLoad = cast<LoadInst>(*PN.use_begin());
- MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
+ LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
+
+ AAMDNodes AATags;
+ SomeLoad->getAAMetadata(AATags);
unsigned Align = SomeLoad->getAlignment();
// Rewrite all loads of the PN to use the new PHI.
while (!PN.use_empty()) {
- LoadInst *LI = cast<LoadInst>(*PN.use_begin());
+ LoadInst *LI = cast<LoadInst>(PN.user_back());
LI->replaceAllUsesWith(NewPN);
LI->eraseFromParent();
}
InVal, (PN.getName() + ".sroa.speculate.load." + Pred->getName()));
++NumLoadsSpeculated;
Load->setAlignment(Align);
- if (TBAATag)
- Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+ if (AATags)
+ Load->setAAMetadata(AATags);
NewPN->addIncoming(Load, Pred);
}
///
/// We can do this to a select if its only uses are loads and if the operand
/// to the select can be loaded unconditionally.
-static bool isSafeSelectToSpeculate(SelectInst &SI, const DataLayout *TD = 0) {
+static bool isSafeSelectToSpeculate(SelectInst &SI,
+ const DataLayout *DL = nullptr) {
Value *TValue = SI.getTrueValue();
Value *FValue = SI.getFalseValue();
- bool TDerefable = TValue->isDereferenceablePointer();
- bool FDerefable = FValue->isDereferenceablePointer();
+ bool TDerefable = TValue->isDereferenceablePointer(DL);
+ bool FDerefable = FValue->isDereferenceablePointer(DL);
- for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end(); UI != UE;
- ++UI) {
- LoadInst *LI = dyn_cast<LoadInst>(*UI);
- if (LI == 0 || !LI->isSimple())
+ for (User *U : SI.users()) {
+ LoadInst *LI = dyn_cast<LoadInst>(U);
+ if (!LI || !LI->isSimple())
return false;
// Both operands to the select need to be dereferencable, either
// absolutely (e.g. allocas) or at this point because we can see other
// accesses to it.
if (!TDerefable &&
- !isSafeToLoadUnconditionally(TValue, LI, LI->getAlignment(), TD))
+ !isSafeToLoadUnconditionally(TValue, LI, LI->getAlignment(), DL))
return false;
if (!FDerefable &&
- !isSafeToLoadUnconditionally(FValue, LI, LI->getAlignment(), TD))
+ !isSafeToLoadUnconditionally(FValue, LI, LI->getAlignment(), DL))
return false;
}
Value *FV = SI.getFalseValue();
// Replace the loads of the select with a select of two loads.
while (!SI.use_empty()) {
- LoadInst *LI = cast<LoadInst>(*SI.use_begin());
+ LoadInst *LI = cast<LoadInst>(SI.user_back());
assert(LI->isSimple() && "We only speculate simple loads");
IRB.SetInsertPoint(LI);
IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
NumLoadsSpeculated += 2;
- // Transfer alignment and TBAA info if present.
+ // Transfer alignment and AA info if present.
TL->setAlignment(LI->getAlignment());
FL->setAlignment(LI->getAlignment());
- if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
- TL->setMetadata(LLVMContext::MD_tbaa, Tag);
- FL->setMetadata(LLVMContext::MD_tbaa, Tag);
+
+ AAMDNodes Tags;
+ LI->getAAMetadata(Tags);
+ if (Tags) {
+ TL->setAAMetadata(Tags);
+ FL->setAAMetadata(Tags);
}
Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
/// This will return the BasePtr if that is valid, or build a new GEP
/// instruction using the IRBuilder if GEP-ing is needed.
static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
- SmallVectorImpl<Value *> &Indices) {
+ SmallVectorImpl<Value *> &Indices, Twine NamePrefix) {
if (Indices.empty())
return BasePtr;
if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
return BasePtr;
- return IRB.CreateInBoundsGEP(BasePtr, Indices, "idx");
+ return IRB.CreateInBoundsGEP(BasePtr, Indices, NamePrefix + "sroa_idx");
}
/// \brief Get a natural GEP off of the BasePtr walking through Ty toward
/// TargetTy. If we can't find one with the same type, we at least try to use
/// one with the same size. If none of that works, we just produce the GEP as
/// indicated by Indices to have the correct offset.
-static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &TD,
+static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
Value *BasePtr, Type *Ty, Type *TargetTy,
- SmallVectorImpl<Value *> &Indices) {
+ SmallVectorImpl<Value *> &Indices,
+ Twine NamePrefix) {
if (Ty == TargetTy)
- return buildGEP(IRB, BasePtr, Indices);
+ return buildGEP(IRB, BasePtr, Indices, NamePrefix);
+
+ // Pointer size to use for the indices.
+ unsigned PtrSize = DL.getPointerTypeSizeInBits(BasePtr->getType());
// See if we can descend into a struct and locate a field with the correct
// type.
do {
if (ElementTy->isPointerTy())
break;
- if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) {
- ElementTy = SeqTy->getElementType();
- // Note that we use the default address space as this index is over an
- // array or a vector, not a pointer.
- Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(0), 0)));
+
+ if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) {
+ ElementTy = ArrayTy->getElementType();
+ Indices.push_back(IRB.getIntN(PtrSize, 0));
+ } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) {
+ ElementTy = VectorTy->getElementType();
+ Indices.push_back(IRB.getInt32(0));
} else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
if (STy->element_begin() == STy->element_end())
break; // Nothing left to descend into.
if (ElementTy != TargetTy)
Indices.erase(Indices.end() - NumLayers, Indices.end());
- return buildGEP(IRB, BasePtr, Indices);
+ return buildGEP(IRB, BasePtr, Indices, NamePrefix);
}
/// \brief Recursively compute indices for a natural GEP.
///
/// This is the recursive step for getNaturalGEPWithOffset that walks down the
/// element types adding appropriate indices for the GEP.
-static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &TD,
+static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
Value *Ptr, Type *Ty, APInt &Offset,
Type *TargetTy,
- SmallVectorImpl<Value *> &Indices) {
+ SmallVectorImpl<Value *> &Indices,
+ Twine NamePrefix) {
if (Offset == 0)
- return getNaturalGEPWithType(IRB, TD, Ptr, Ty, TargetTy, Indices);
+ return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, NamePrefix);
// We can't recurse through pointer types.
if (Ty->isPointerTy())
- return 0;
+ return nullptr;
// We try to analyze GEPs over vectors here, but note that these GEPs are
// extremely poorly defined currently. The long-term goal is to remove GEPing
// over a vector from the IR completely.
if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
- unsigned ElementSizeInBits = TD.getTypeSizeInBits(VecTy->getScalarType());
- if (ElementSizeInBits % 8)
- return 0; // GEPs over non-multiple of 8 size vector elements are invalid.
+ unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType());
+ if (ElementSizeInBits % 8 != 0) {
+ // GEPs over non-multiple of 8 size vector elements are invalid.
+ return nullptr;
+ }
APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
APInt NumSkippedElements = Offset.sdiv(ElementSize);
if (NumSkippedElements.ugt(VecTy->getNumElements()))
- return 0;
+ return nullptr;
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
- return getNaturalGEPRecursively(IRB, TD, Ptr, VecTy->getElementType(),
- Offset, TargetTy, Indices);
+ return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(),
+ Offset, TargetTy, Indices, NamePrefix);
}
if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
Type *ElementTy = ArrTy->getElementType();
- APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy));
+ APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
APInt NumSkippedElements = Offset.sdiv(ElementSize);
if (NumSkippedElements.ugt(ArrTy->getNumElements()))
- return 0;
+ return nullptr;
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
- return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
- Indices);
+ return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
+ Indices, NamePrefix);
}
StructType *STy = dyn_cast<StructType>(Ty);
if (!STy)
- return 0;
+ return nullptr;
- const StructLayout *SL = TD.getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
uint64_t StructOffset = Offset.getZExtValue();
if (StructOffset >= SL->getSizeInBytes())
- return 0;
+ return nullptr;
unsigned Index = SL->getElementContainingOffset(StructOffset);
Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
Type *ElementTy = STy->getElementType(Index);
- if (Offset.uge(TD.getTypeAllocSize(ElementTy)))
- return 0; // The offset points into alignment padding.
+ if (Offset.uge(DL.getTypeAllocSize(ElementTy)))
+ return nullptr; // The offset points into alignment padding.
Indices.push_back(IRB.getInt32(Index));
- return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
- Indices);
+ return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
+ Indices, NamePrefix);
}
/// \brief Get a natural GEP from a base pointer to a particular offset and
/// Indices, and setting Ty to the result subtype.
///
/// If no natural GEP can be constructed, this function returns null.
-static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &TD,
+static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
Value *Ptr, APInt Offset, Type *TargetTy,
- SmallVectorImpl<Value *> &Indices) {
+ SmallVectorImpl<Value *> &Indices,
+ Twine NamePrefix) {
PointerType *Ty = cast<PointerType>(Ptr->getType());
// Don't consider any GEPs through an i8* as natural unless the TargetTy is
// an i8.
- if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8))
- return 0;
+ if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8))
+ return nullptr;
Type *ElementTy = Ty->getElementType();
if (!ElementTy->isSized())
- return 0; // We can't GEP through an unsized element.
- APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy));
+ return nullptr; // We can't GEP through an unsized element.
+ APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
if (ElementSize == 0)
- return 0; // Zero-length arrays can't help us build a natural GEP.
+ return nullptr; // Zero-length arrays can't help us build a natural GEP.
APInt NumSkippedElements = Offset.sdiv(ElementSize);
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
- return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
- Indices);
+ return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
+ Indices, NamePrefix);
}
/// \brief Compute an adjusted pointer from Ptr by Offset bytes where the
/// properties. The algorithm tries to fold as many constant indices into
/// a single GEP as possible, thus making each GEP more independent of the
/// surrounding code.
-static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &TD,
- Value *Ptr, APInt Offset, Type *PointerTy) {
+static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
+ APInt Offset, Type *PointerTy,
+ Twine NamePrefix) {
// Even though we don't look through PHI nodes, we could be called on an
// instruction in an unreachable block, which may be on a cycle.
SmallPtrSet<Value *, 4> Visited;
// We may end up computing an offset pointer that has the wrong type. If we
// never are able to compute one directly that has the correct type, we'll
// fall back to it, so keep it around here.
- Value *OffsetPtr = 0;
+ Value *OffsetPtr = nullptr;
// Remember any i8 pointer we come across to re-use if we need to do a raw
// byte offset.
- Value *Int8Ptr = 0;
+ Value *Int8Ptr = nullptr;
APInt Int8PtrOffset(Offset.getBitWidth(), 0);
Type *TargetTy = PointerTy->getPointerElementType();
// First fold any existing GEPs into the offset.
while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
APInt GEPOffset(Offset.getBitWidth(), 0);
- if (!GEP->accumulateConstantOffset(TD, GEPOffset))
+ if (!GEP->accumulateConstantOffset(DL, GEPOffset))
break;
Offset += GEPOffset;
Ptr = GEP->getPointerOperand();
// See if we can perform a natural GEP here.
Indices.clear();
- if (Value *P = getNaturalGEPWithOffset(IRB, TD, Ptr, Offset, TargetTy,
- Indices)) {
+ if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
+ Indices, NamePrefix)) {
if (P->getType() == PointerTy) {
// Zap any offset pointer that we ended up computing in previous rounds.
if (OffsetPtr && OffsetPtr->use_empty())
if (!OffsetPtr) {
if (!Int8Ptr) {
- Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(),
- "raw_cast");
+ Int8Ptr = IRB.CreateBitCast(
+ Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()),
+ NamePrefix + "sroa_raw_cast");
Int8PtrOffset = Offset;
}
OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr :
IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset),
- "raw_idx");
+ NamePrefix + "sroa_raw_idx");
}
Ptr = OffsetPtr;
// On the off chance we were targeting i8*, guard the bitcast here.
if (Ptr->getType() != PointerTy)
- Ptr = IRB.CreateBitCast(Ptr, PointerTy, "cast");
+ Ptr = IRB.CreateBitCast(Ptr, PointerTy, NamePrefix + "sroa_cast");
return Ptr;
}
if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
return false;
+ // We can convert pointers to integers and vice-versa. Same for vectors
+ // of pointers and integers.
+ OldTy = OldTy->getScalarType();
+ NewTy = NewTy->getScalarType();
if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
if (NewTy->isPointerTy() && OldTy->isPointerTy())
return true;
/// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
/// two types for viability with this routine.
static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
- Type *Ty) {
- assert(canConvertValue(DL, V->getType(), Ty) &&
- "Value not convertable to type");
- if (V->getType() == Ty)
+ Type *NewTy) {
+ Type *OldTy = V->getType();
+ assert(canConvertValue(DL, OldTy, NewTy) && "Value not convertable to type");
+
+ if (OldTy == NewTy)
return V;
- if (IntegerType *OldITy = dyn_cast<IntegerType>(V->getType()))
- if (IntegerType *NewITy = dyn_cast<IntegerType>(Ty))
+
+ if (IntegerType *OldITy = dyn_cast<IntegerType>(OldTy))
+ if (IntegerType *NewITy = dyn_cast<IntegerType>(NewTy))
if (NewITy->getBitWidth() > OldITy->getBitWidth())
return IRB.CreateZExt(V, NewITy);
- if (V->getType()->isIntegerTy() && Ty->isPointerTy())
- return IRB.CreateIntToPtr(V, Ty);
- if (V->getType()->isPointerTy() && Ty->isIntegerTy())
- return IRB.CreatePtrToInt(V, Ty);
- return IRB.CreateBitCast(V, Ty);
+ // See if we need inttoptr for this type pair. A cast involving both scalars
+ // and vectors requires and additional bitcast.
+ if (OldTy->getScalarType()->isIntegerTy() &&
+ NewTy->getScalarType()->isPointerTy()) {
+ // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
+ if (OldTy->isVectorTy() && !NewTy->isVectorTy())
+ return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
+ NewTy);
+
+ // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*>
+ if (!OldTy->isVectorTy() && NewTy->isVectorTy())
+ return IRB.CreateIntToPtr(IRB.CreateBitCast(V, DL.getIntPtrType(NewTy)),
+ NewTy);
+
+ return IRB.CreateIntToPtr(V, NewTy);
+ }
+
+ // See if we need ptrtoint for this type pair. A cast involving both scalars
+ // and vectors requires and additional bitcast.
+ if (OldTy->getScalarType()->isPointerTy() &&
+ NewTy->getScalarType()->isIntegerTy()) {
+ // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
+ if (OldTy->isVectorTy() && !NewTy->isVectorTy())
+ return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
+ NewTy);
+
+ // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32>
+ if (!OldTy->isVectorTy() && NewTy->isVectorTy())
+ return IRB.CreateBitCast(IRB.CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
+ NewTy);
+
+ return IRB.CreatePtrToInt(V, NewTy);
+ }
+
+ return IRB.CreateBitCast(V, NewTy);
}
-/// \brief Test whether the given partition use can be promoted to a vector.
+/// \brief Test whether the given slice use can be promoted to a vector.
///
/// This function is called to test each entry in a partioning which is slated
-/// for a single partition.
-static bool isVectorPromotionViableForPartitioning(
- const DataLayout &TD, AllocaPartitioning &P,
- uint64_t PartitionBeginOffset, uint64_t PartitionEndOffset, VectorType *Ty,
- uint64_t ElementSize, AllocaPartitioning::const_iterator I) {
- // First validate the partitioning offsets.
+/// for a single slice.
+static bool
+isVectorPromotionViableForSlice(const DataLayout &DL, uint64_t SliceBeginOffset,
+ uint64_t SliceEndOffset, VectorType *Ty,
+ uint64_t ElementSize, const Slice &S) {
+ // First validate the slice offsets.
uint64_t BeginOffset =
- std::max(I->beginOffset(), PartitionBeginOffset) - PartitionBeginOffset;
+ std::max(S.beginOffset(), SliceBeginOffset) - SliceBeginOffset;
uint64_t BeginIndex = BeginOffset / ElementSize;
if (BeginIndex * ElementSize != BeginOffset ||
BeginIndex >= Ty->getNumElements())
return false;
uint64_t EndOffset =
- std::min(I->endOffset(), PartitionEndOffset) - PartitionBeginOffset;
+ std::min(S.endOffset(), SliceEndOffset) - SliceBeginOffset;
uint64_t EndIndex = EndOffset / ElementSize;
if (EndIndex * ElementSize != EndOffset || EndIndex > Ty->getNumElements())
return false;
assert(EndIndex > BeginIndex && "Empty vector!");
uint64_t NumElements = EndIndex - BeginIndex;
- Type *PartitionTy =
- (NumElements == 1) ? Ty->getElementType()
- : VectorType::get(Ty->getElementType(), NumElements);
+ Type *SliceTy = (NumElements == 1)
+ ? Ty->getElementType()
+ : VectorType::get(Ty->getElementType(), NumElements);
Type *SplitIntTy =
Type::getIntNTy(Ty->getContext(), NumElements * ElementSize * 8);
- Use *U = I->getUse();
+ Use *U = S.getUse();
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
if (MI->isVolatile())
return false;
- if (!I->isSplittable())
+ if (!S.isSplittable())
return false; // Skip any unsplittable intrinsics.
+ } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
+ if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
+ II->getIntrinsicID() != Intrinsic::lifetime_end)
+ return false;
} else if (U->get()->getType()->getPointerElementType()->isStructTy()) {
// Disable vector promotion when there are loads or stores of an FCA.
return false;
if (LI->isVolatile())
return false;
Type *LTy = LI->getType();
- if (PartitionBeginOffset > I->beginOffset() ||
- PartitionEndOffset < I->endOffset()) {
+ if (SliceBeginOffset > S.beginOffset() || SliceEndOffset < S.endOffset()) {
assert(LTy->isIntegerTy());
LTy = SplitIntTy;
}
- if (!canConvertValue(TD, PartitionTy, LTy))
+ if (!canConvertValue(DL, SliceTy, LTy))
return false;
} else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
if (SI->isVolatile())
return false;
Type *STy = SI->getValueOperand()->getType();
- if (PartitionBeginOffset > I->beginOffset() ||
- PartitionEndOffset < I->endOffset()) {
+ if (SliceBeginOffset > S.beginOffset() || SliceEndOffset < S.endOffset()) {
assert(STy->isIntegerTy());
STy = SplitIntTy;
}
- if (!canConvertValue(TD, STy, PartitionTy))
+ if (!canConvertValue(DL, STy, SliceTy))
return false;
+ } else {
+ return false;
}
return true;
}
-/// \brief Test whether the given alloca partition can be promoted to a vector.
+/// \brief Test whether the given alloca partitioning and range of slices can be
+/// promoted to a vector.
///
/// This is a quick test to check whether we can rewrite a particular alloca
/// partition (and its newly formed alloca) into a vector alloca with only
/// SSA value. We only can ensure this for a limited set of operations, and we
/// don't want to do the rewrites unless we are confident that the result will
/// be promotable, so we have an early test here.
-static bool isVectorPromotionViable(
- const DataLayout &TD, Type *AllocaTy, AllocaPartitioning &P,
- uint64_t PartitionBeginOffset, uint64_t PartitionEndOffset,
- AllocaPartitioning::const_iterator I, AllocaPartitioning::const_iterator E,
- ArrayRef<AllocaPartitioning::iterator> SplitUses) {
- VectorType *Ty = dyn_cast<VectorType>(AllocaTy);
- if (!Ty)
- return false;
+static VectorType *
+isVectorPromotionViable(const DataLayout &DL, Type *AllocaTy,
+ uint64_t SliceBeginOffset, uint64_t SliceEndOffset,
+ AllocaSlices::const_range Slices,
+ ArrayRef<AllocaSlices::iterator> SplitUses) {
+ // Collect the candidate types for vector-based promotion. Also track whether
+ // we have different element types.
+ SmallVector<VectorType *, 4> CandidateTys;
+ Type *CommonEltTy = nullptr;
+ bool HaveCommonEltTy = true;
+ auto CheckCandidateType = [&](Type *Ty) {
+ if (auto *VTy = dyn_cast<VectorType>(Ty)) {
+ CandidateTys.push_back(VTy);
+ if (!CommonEltTy)
+ CommonEltTy = VTy->getElementType();
+ else if (CommonEltTy != VTy->getElementType())
+ HaveCommonEltTy = false;
+ }
+ };
+ CheckCandidateType(AllocaTy);
+ // Consider any loads or stores that are the exact size of the slice.
+ for (const auto &S : Slices)
+ if (S.beginOffset() == SliceBeginOffset &&
+ S.endOffset() == SliceEndOffset) {
+ if (auto *LI = dyn_cast<LoadInst>(S.getUse()->getUser()))
+ CheckCandidateType(LI->getType());
+ else if (auto *SI = dyn_cast<StoreInst>(S.getUse()->getUser()))
+ CheckCandidateType(SI->getValueOperand()->getType());
+ }
- uint64_t ElementSize = TD.getTypeSizeInBits(Ty->getScalarType());
+ // If we didn't find a vector type, nothing to do here.
+ if (CandidateTys.empty())
+ return nullptr;
+
+ // Remove non-integer vector types if we had multiple common element types.
+ // FIXME: It'd be nice to replace them with integer vector types, but we can't
+ // do that until all the backends are known to produce good code for all
+ // integer vector types.
+ if (!HaveCommonEltTy) {
+ CandidateTys.erase(std::remove_if(CandidateTys.begin(), CandidateTys.end(),
+ [](VectorType *VTy) {
+ return !VTy->getElementType()->isIntegerTy();
+ }),
+ CandidateTys.end());
+
+ // If there were no integer vector types, give up.
+ if (CandidateTys.empty())
+ return nullptr;
+
+ // Rank the remaining candidate vector types. This is easy because we know
+ // they're all integer vectors. We sort by ascending number of elements.
+ auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) {
+ assert(DL.getTypeSizeInBits(RHSTy) == DL.getTypeSizeInBits(LHSTy) &&
+ "Cannot have vector types of different sizes!");
+ assert(RHSTy->getElementType()->isIntegerTy() &&
+ "All non-integer types eliminated!");
+ assert(LHSTy->getElementType()->isIntegerTy() &&
+ "All non-integer types eliminated!");
+ return RHSTy->getNumElements() < LHSTy->getNumElements();
+ };
+ std::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes);
+ CandidateTys.erase(
+ std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes),
+ CandidateTys.end());
+ } else {
+// The only way to have the same element type in every vector type is to
+// have the same vector type. Check that and remove all but one.
+#ifndef NDEBUG
+ for (VectorType *VTy : CandidateTys) {
+ assert(VTy->getElementType() == CommonEltTy &&
+ "Unaccounted for element type!");
+ assert(VTy == CandidateTys[0] &&
+ "Different vector types with the same element type!");
+ }
+#endif
+ CandidateTys.resize(1);
+ }
- // While the definition of LLVM vectors is bitpacked, we don't support sizes
- // that aren't byte sized.
- if (ElementSize % 8)
- return false;
- assert((TD.getTypeSizeInBits(Ty) % 8) == 0 &&
- "vector size not a multiple of element size?");
- ElementSize /= 8;
-
- for (; I != E; ++I)
- if (!isVectorPromotionViableForPartitioning(
- TD, P, PartitionBeginOffset, PartitionEndOffset, Ty, ElementSize,
- I))
- return false;
+ // Try each vector type, and return the one which works.
+ auto CheckVectorTypeForPromotion = [&](VectorType *VTy) {
+ uint64_t ElementSize = DL.getTypeSizeInBits(VTy->getElementType());
- for (ArrayRef<AllocaPartitioning::iterator>::const_iterator
- SUI = SplitUses.begin(),
- SUE = SplitUses.end();
- SUI != SUE; ++SUI)
- if (!isVectorPromotionViableForPartitioning(
- TD, P, PartitionBeginOffset, PartitionEndOffset, Ty, ElementSize,
- *SUI))
+ // While the definition of LLVM vectors is bitpacked, we don't support sizes
+ // that aren't byte sized.
+ if (ElementSize % 8)
return false;
+ assert((DL.getTypeSizeInBits(VTy) % 8) == 0 &&
+ "vector size not a multiple of element size?");
+ ElementSize /= 8;
- return true;
+ for (const auto &S : Slices)
+ if (!isVectorPromotionViableForSlice(DL, SliceBeginOffset, SliceEndOffset,
+ VTy, ElementSize, S))
+ return false;
+
+ for (const auto &SI : SplitUses)
+ if (!isVectorPromotionViableForSlice(DL, SliceBeginOffset, SliceEndOffset,
+ VTy, ElementSize, *SI))
+ return false;
+
+ return true;
+ };
+ for (VectorType *VTy : CandidateTys)
+ if (CheckVectorTypeForPromotion(VTy))
+ return VTy;
+
+ return nullptr;
}
-/// \brief Test whether a partitioning slice of an alloca is a valid set of
-/// operations for integer widening.
+/// \brief Test whether a slice of an alloca is valid for integer widening.
///
/// This implements the necessary checking for the \c isIntegerWideningViable
-/// test below on a single partitioning slice of the alloca.
-static bool isIntegerWideningViableForPartitioning(
- const DataLayout &TD, Type *AllocaTy, uint64_t AllocBeginOffset,
- uint64_t Size, AllocaPartitioning &P, AllocaPartitioning::const_iterator I,
- bool &WholeAllocaOp) {
- uint64_t RelBegin = I->beginOffset() - AllocBeginOffset;
- uint64_t RelEnd = I->endOffset() - AllocBeginOffset;
+/// test below on a single slice of the alloca.
+static bool isIntegerWideningViableForSlice(const DataLayout &DL,
+ Type *AllocaTy,
+ uint64_t AllocBeginOffset,
+ uint64_t Size,
+ const Slice &S,
+ bool &WholeAllocaOp) {
+ uint64_t RelBegin = S.beginOffset() - AllocBeginOffset;
+ uint64_t RelEnd = S.endOffset() - AllocBeginOffset;
// We can't reasonably handle cases where the load or store extends past
// the end of the aloca's type and into its padding.
if (RelEnd > Size)
return false;
- Use *U = I->getUse();
+ Use *U = S.getUse();
if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
if (LI->isVolatile())
return false;
- if (RelBegin == 0 && RelEnd == Size)
+ // Note that we don't count vector loads or stores as whole-alloca
+ // operations which enable integer widening because we would prefer to use
+ // vector widening instead.
+ if (!isa<VectorType>(LI->getType()) && RelBegin == 0 && RelEnd == Size)
WholeAllocaOp = true;
if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
- if (ITy->getBitWidth() < TD.getTypeStoreSizeInBits(ITy))
+ if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
return false;
} else if (RelBegin != 0 || RelEnd != Size ||
- !canConvertValue(TD, AllocaTy, LI->getType())) {
+ !canConvertValue(DL, AllocaTy, LI->getType())) {
// Non-integer loads need to be convertible from the alloca type so that
// they are promotable.
return false;
Type *ValueTy = SI->getValueOperand()->getType();
if (SI->isVolatile())
return false;
- if (RelBegin == 0 && RelEnd == Size)
+ // Note that we don't count vector loads or stores as whole-alloca
+ // operations which enable integer widening because we would prefer to use
+ // vector widening instead.
+ if (!isa<VectorType>(ValueTy) && RelBegin == 0 && RelEnd == Size)
WholeAllocaOp = true;
if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
- if (ITy->getBitWidth() < TD.getTypeStoreSizeInBits(ITy))
+ if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy))
return false;
} else if (RelBegin != 0 || RelEnd != Size ||
- !canConvertValue(TD, ValueTy, AllocaTy)) {
+ !canConvertValue(DL, ValueTy, AllocaTy)) {
// Non-integer stores need to be convertible to the alloca type so that
// they are promotable.
return false;
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
return false;
- if (!I->isSplittable())
+ if (!S.isSplittable())
return false; // Skip any unsplittable intrinsics.
} else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
/// stores to a particular alloca into wider loads and stores and be able to
/// promote the resulting alloca.
static bool
-isIntegerWideningViable(const DataLayout &TD, Type *AllocaTy,
- uint64_t AllocBeginOffset, AllocaPartitioning &P,
- AllocaPartitioning::const_iterator I,
- AllocaPartitioning::const_iterator E,
- ArrayRef<AllocaPartitioning::iterator> SplitUses) {
- uint64_t SizeInBits = TD.getTypeSizeInBits(AllocaTy);
+isIntegerWideningViable(const DataLayout &DL, Type *AllocaTy,
+ uint64_t AllocBeginOffset,
+ AllocaSlices::const_range Slices,
+ ArrayRef<AllocaSlices::iterator> SplitUses) {
+ uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy);
// Don't create integer types larger than the maximum bitwidth.
if (SizeInBits > IntegerType::MAX_INT_BITS)
return false;
// Don't try to handle allocas with bit-padding.
- if (SizeInBits != TD.getTypeStoreSizeInBits(AllocaTy))
+ if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy))
return false;
// We need to ensure that an integer type with the appropriate bitwidth can
// be converted to the alloca type, whatever that is. We don't want to force
// the alloca itself to have an integer type if there is a more suitable one.
Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
- if (!canConvertValue(TD, AllocaTy, IntTy) ||
- !canConvertValue(TD, IntTy, AllocaTy))
+ if (!canConvertValue(DL, AllocaTy, IntTy) ||
+ !canConvertValue(DL, IntTy, AllocaTy))
return false;
- uint64_t Size = TD.getTypeStoreSize(AllocaTy);
+ uint64_t Size = DL.getTypeStoreSize(AllocaTy);
// While examining uses, we ensure that the alloca has a covering load or
// store. We don't want to widen the integer operations only to fail to
// promote due to some other unsplittable entry (which we may make splittable
// later). However, if there are only splittable uses, go ahead and assume
// that we cover the alloca.
- bool WholeAllocaOp = (I != E) ? false : TD.isLegalInteger(SizeInBits);
+ bool WholeAllocaOp =
+ Slices.begin() != Slices.end() ? false : DL.isLegalInteger(SizeInBits);
- for (; I != E; ++I)
- if (!isIntegerWideningViableForPartitioning(TD, AllocaTy, AllocBeginOffset,
- Size, P, I, WholeAllocaOp))
+ for (const auto &S : Slices)
+ if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
+ S, WholeAllocaOp))
return false;
- for (ArrayRef<AllocaPartitioning::iterator>::const_iterator
- SUI = SplitUses.begin(),
- SUE = SplitUses.end();
- SUI != SUE; ++SUI)
- if (!isIntegerWideningViableForPartitioning(TD, AllocaTy, AllocBeginOffset,
- Size, P, *SUI, WholeAllocaOp))
+ for (const auto &SI : SplitUses)
+ if (!isIntegerWideningViableForSlice(DL, AllocaTy, AllocBeginOffset, Size,
+ *SI, WholeAllocaOp))
return false;
return WholeAllocaOp;
}
namespace {
-/// \brief Visitor to rewrite instructions using a partition of an alloca to
-/// use a new alloca.
+/// \brief Visitor to rewrite instructions using p particular slice of an alloca
+/// to use a new alloca.
///
/// Also implements the rewriting to vector-based accesses when the partition
/// passes the isVectorPromotionViable predicate. Most of the rewriting logic
/// lives here.
-class AllocaPartitionRewriter : public InstVisitor<AllocaPartitionRewriter,
- bool> {
+class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
// Befriend the base class so it can delegate to private visit methods.
- friend class llvm::InstVisitor<AllocaPartitionRewriter, bool>;
- typedef llvm::InstVisitor<AllocaPartitionRewriter, bool> Base;
+ friend class llvm::InstVisitor<AllocaSliceRewriter, bool>;
+ typedef llvm::InstVisitor<AllocaSliceRewriter, bool> Base;
- const DataLayout &TD;
- AllocaPartitioning &P;
+ const DataLayout &DL;
+ AllocaSlices &AS;
SROA &Pass;
AllocaInst &OldAI, &NewAI;
const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
Type *NewAllocaTy;
+ // This is a convenience and flag variable that will be null unless the new
+ // alloca's integer operations should be widened to this integer type due to
+ // passing isIntegerWideningViable above. If it is non-null, the desired
+ // integer type will be stored here for easy access during rewriting.
+ IntegerType *IntTy;
+
// If we are rewriting an alloca partition which can be written as pure
// vector operations, we stash extra information here. When VecTy is
// non-null, we have some strict guarantees about the rewritten alloca:
Type *ElementTy;
uint64_t ElementSize;
- // This is a convenience and flag variable that will be null unless the new
- // alloca's integer operations should be widened to this integer type due to
- // passing isIntegerWideningViable above. If it is non-null, the desired
- // integer type will be stored here for easy access during rewriting.
- IntegerType *IntTy;
-
- // The offset of the partition user currently being rewritten.
+ // The original offset of the slice currently being rewritten relative to
+ // the original alloca.
uint64_t BeginOffset, EndOffset;
+ // The new offsets of the slice currently being rewritten relative to the
+ // original alloca.
+ uint64_t NewBeginOffset, NewEndOffset;
+
+ uint64_t SliceSize;
bool IsSplittable;
bool IsSplit;
Use *OldUse;
Instruction *OldPtr;
+ // Track post-rewrite users which are PHI nodes and Selects.
+ SmallPtrSetImpl<PHINode *> &PHIUsers;
+ SmallPtrSetImpl<SelectInst *> &SelectUsers;
+
// Utility IR builder, whose name prefix is setup for each visited use, and
// the insertion point is set to point to the user.
IRBuilderTy IRB;
public:
- AllocaPartitionRewriter(const DataLayout &TD, AllocaPartitioning &P,
- SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI,
- uint64_t NewBeginOffset, uint64_t NewEndOffset,
- bool IsVectorPromotable = false,
- bool IsIntegerPromotable = false)
- : TD(TD), P(P), Pass(Pass), OldAI(OldAI), NewAI(NewAI),
- NewAllocaBeginOffset(NewBeginOffset), NewAllocaEndOffset(NewEndOffset),
+ AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &AS, SROA &Pass,
+ AllocaInst &OldAI, AllocaInst &NewAI,
+ uint64_t NewAllocaBeginOffset,
+ uint64_t NewAllocaEndOffset, bool IsIntegerPromotable,
+ VectorType *PromotableVecTy,
+ SmallPtrSetImpl<PHINode *> &PHIUsers,
+ SmallPtrSetImpl<SelectInst *> &SelectUsers)
+ : DL(DL), AS(AS), Pass(Pass), OldAI(OldAI), NewAI(NewAI),
+ NewAllocaBeginOffset(NewAllocaBeginOffset),
+ NewAllocaEndOffset(NewAllocaEndOffset),
NewAllocaTy(NewAI.getAllocatedType()),
- VecTy(IsVectorPromotable ? cast<VectorType>(NewAllocaTy) : 0),
- ElementTy(VecTy ? VecTy->getElementType() : 0),
- ElementSize(VecTy ? TD.getTypeSizeInBits(ElementTy) / 8 : 0),
IntTy(IsIntegerPromotable
? Type::getIntNTy(
NewAI.getContext(),
- TD.getTypeSizeInBits(NewAI.getAllocatedType()))
- : 0),
+ DL.getTypeSizeInBits(NewAI.getAllocatedType()))
+ : nullptr),
+ VecTy(PromotableVecTy),
+ ElementTy(VecTy ? VecTy->getElementType() : nullptr),
+ ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0),
BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(),
- OldPtr(), IRB(NewAI.getContext(), ConstantFolder()) {
+ OldPtr(), PHIUsers(PHIUsers), SelectUsers(SelectUsers),
+ IRB(NewAI.getContext(), ConstantFolder()) {
if (VecTy) {
- assert((TD.getTypeSizeInBits(ElementTy) % 8) == 0 &&
+ assert((DL.getTypeSizeInBits(ElementTy) % 8) == 0 &&
"Only multiple-of-8 sized vector elements are viable");
++NumVectorized;
}
- assert((!IsVectorPromotable && !IsIntegerPromotable) ||
- IsVectorPromotable != IsIntegerPromotable);
+ assert((!IntTy && !VecTy) || (IntTy && !VecTy) || (!IntTy && VecTy));
}
- bool visit(AllocaPartitioning::const_iterator I) {
+ bool visit(AllocaSlices::const_iterator I) {
bool CanSROA = true;
BeginOffset = I->beginOffset();
EndOffset = I->endOffset();
IsSplit =
BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset;
+ // Compute the intersecting offset range.
+ assert(BeginOffset < NewAllocaEndOffset);
+ assert(EndOffset > NewAllocaBeginOffset);
+ NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
+ NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
+
+ SliceSize = NewEndOffset - NewBeginOffset;
+
OldUse = I->getUse();
OldPtr = cast<Instruction>(OldUse->get());
llvm_unreachable("No rewrite rule for this instruction!");
}
- Value *getAdjustedAllocaPtr(IRBuilderTy &IRB, uint64_t Offset,
- Type *PointerTy) {
- assert(Offset >= NewAllocaBeginOffset);
- return getAdjustedPtr(IRB, TD, &NewAI, APInt(TD.getPointerSizeInBits(),
- Offset - NewAllocaBeginOffset),
- PointerTy);
- }
+ Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) {
+ // Note that the offset computation can use BeginOffset or NewBeginOffset
+ // interchangeably for unsplit slices.
+ assert(IsSplit || BeginOffset == NewBeginOffset);
+ uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
- /// \brief Compute suitable alignment to access an offset into the new alloca.
- unsigned getOffsetAlign(uint64_t Offset) {
- unsigned NewAIAlign = NewAI.getAlignment();
- if (!NewAIAlign)
- NewAIAlign = TD.getABITypeAlignment(NewAI.getAllocatedType());
- return MinAlign(NewAIAlign, Offset);
+#ifndef NDEBUG
+ StringRef OldName = OldPtr->getName();
+ // Skip through the last '.sroa.' component of the name.
+ size_t LastSROAPrefix = OldName.rfind(".sroa.");
+ if (LastSROAPrefix != StringRef::npos) {
+ OldName = OldName.substr(LastSROAPrefix + strlen(".sroa."));
+ // Look for an SROA slice index.
+ size_t IndexEnd = OldName.find_first_not_of("0123456789");
+ if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') {
+ // Strip the index and look for the offset.
+ OldName = OldName.substr(IndexEnd + 1);
+ size_t OffsetEnd = OldName.find_first_not_of("0123456789");
+ if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.')
+ // Strip the offset.
+ OldName = OldName.substr(OffsetEnd + 1);
+ }
+ }
+ // Strip any SROA suffixes as well.
+ OldName = OldName.substr(0, OldName.find(".sroa_"));
+#endif
+
+ return getAdjustedPtr(IRB, DL, &NewAI,
+ APInt(DL.getPointerSizeInBits(), Offset), PointerTy,
+#ifndef NDEBUG
+ Twine(OldName) + "."
+#else
+ Twine()
+#endif
+ );
}
- /// \brief Compute suitable alignment to access a type at an offset of the
- /// new alloca.
+ /// \brief Compute suitable alignment to access this slice of the *new* alloca.
///
- /// \returns zero if the type's ABI alignment is a suitable alignment,
- /// otherwise returns the maximal suitable alignment.
- unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) {
- unsigned Align = getOffsetAlign(Offset);
- return Align == TD.getABITypeAlignment(Ty) ? 0 : Align;
+ /// You can optionally pass a type to this routine and if that type's ABI
+ /// alignment is itself suitable, this will return zero.
+ unsigned getSliceAlign(Type *Ty = nullptr) {
+ unsigned NewAIAlign = NewAI.getAlignment();
+ if (!NewAIAlign)
+ NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType());
+ unsigned Align = MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset);
+ return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align;
}
unsigned getIndex(uint64_t Offset) {
Pass.DeadInsts.insert(I);
}
- Value *rewriteVectorizedLoadInst(uint64_t NewBeginOffset,
- uint64_t NewEndOffset) {
+ Value *rewriteVectorizedLoadInst() {
unsigned BeginIndex = getIndex(NewBeginOffset);
unsigned EndIndex = getIndex(NewEndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
}
- Value *rewriteIntegerLoad(LoadInst &LI, uint64_t NewBeginOffset,
- uint64_t NewEndOffset) {
+ Value *rewriteIntegerLoad(LoadInst &LI) {
assert(IntTy && "We cannot insert an integer to the alloca");
assert(!LI.isVolatile());
Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"load");
- V = convertValue(TD, IRB, V, IntTy);
+ V = convertValue(DL, IRB, V, IntTy);
assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
if (Offset > 0 || NewEndOffset < NewAllocaEndOffset)
- V = extractInteger(TD, IRB, V, cast<IntegerType>(LI.getType()), Offset,
+ V = extractInteger(DL, IRB, V, cast<IntegerType>(LI.getType()), Offset,
"extract");
return V;
}
Value *OldOp = LI.getOperand(0);
assert(OldOp == OldPtr);
- // Compute the intersecting offset range.
- assert(BeginOffset < NewAllocaEndOffset);
- assert(EndOffset > NewAllocaBeginOffset);
- uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
- uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
-
- uint64_t Size = NewEndOffset - NewBeginOffset;
-
- Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), Size * 8)
+ Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8)
: LI.getType();
bool IsPtrAdjusted = false;
Value *V;
if (VecTy) {
- V = rewriteVectorizedLoadInst(NewBeginOffset, NewEndOffset);
+ V = rewriteVectorizedLoadInst();
} else if (IntTy && LI.getType()->isIntegerTy()) {
- V = rewriteIntegerLoad(LI, NewBeginOffset, NewEndOffset);
+ V = rewriteIntegerLoad(LI);
} else if (NewBeginOffset == NewAllocaBeginOffset &&
- canConvertValue(TD, NewAllocaTy, LI.getType())) {
+ canConvertValue(DL, NewAllocaTy, LI.getType())) {
V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
- LI.isVolatile(), "load");
+ LI.isVolatile(), LI.getName());
} else {
Type *LTy = TargetTy->getPointerTo();
- V = IRB.CreateAlignedLoad(
- getAdjustedAllocaPtr(IRB, NewBeginOffset, LTy),
- getOffsetTypeAlign(TargetTy, NewBeginOffset - NewAllocaBeginOffset),
- LI.isVolatile(), "load");
+ V = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
+ getSliceAlign(TargetTy), LI.isVolatile(),
+ LI.getName());
IsPtrAdjusted = true;
}
- V = convertValue(TD, IRB, V, TargetTy);
+ V = convertValue(DL, IRB, V, TargetTy);
if (IsSplit) {
assert(!LI.isVolatile());
assert(LI.getType()->isIntegerTy() &&
"Only integer type loads and stores are split");
- assert(Size < TD.getTypeStoreSize(LI.getType()) &&
+ assert(SliceSize < DL.getTypeStoreSize(LI.getType()) &&
"Split load isn't smaller than original load");
assert(LI.getType()->getIntegerBitWidth() ==
- TD.getTypeStoreSizeInBits(LI.getType()) &&
+ DL.getTypeStoreSizeInBits(LI.getType()) &&
"Non-byte-multiple bit width");
// Move the insertion point just past the load so that we can refer to it.
- IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI)));
+ IRB.SetInsertPoint(std::next(BasicBlock::iterator(&LI)));
// Create a placeholder value with the same type as LI to use as the
// basis for the new value. This allows us to replace the uses of LI with
// the computed value, and then replace the placeholder with LI, leaving
// LI only used for this computation.
Value *Placeholder
= new LoadInst(UndefValue::get(LI.getType()->getPointerTo()));
- V = insertInteger(TD, IRB, Placeholder, V, NewBeginOffset,
+ V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset,
"insert");
LI.replaceAllUsesWith(V);
Placeholder->replaceAllUsesWith(&LI);
return !LI.isVolatile() && !IsPtrAdjusted;
}
- bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp,
- uint64_t NewBeginOffset,
- uint64_t NewEndOffset) {
+ bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp) {
if (V->getType() != VecTy) {
unsigned BeginIndex = getIndex(NewBeginOffset);
unsigned EndIndex = getIndex(NewEndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
unsigned NumElements = EndIndex - BeginIndex;
assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
- Type *PartitionTy
- = (NumElements == 1) ? ElementTy
- : VectorType::get(ElementTy, NumElements);
- if (V->getType() != PartitionTy)
- V = convertValue(TD, IRB, V, PartitionTy);
+ Type *SliceTy =
+ (NumElements == 1) ? ElementTy
+ : VectorType::get(ElementTy, NumElements);
+ if (V->getType() != SliceTy)
+ V = convertValue(DL, IRB, V, SliceTy);
// Mix in the existing elements.
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
return true;
}
- bool rewriteIntegerStore(Value *V, StoreInst &SI,
- uint64_t NewBeginOffset, uint64_t NewEndOffset) {
+ bool rewriteIntegerStore(Value *V, StoreInst &SI) {
assert(IntTy && "We cannot extract an integer from the alloca");
assert(!SI.isVolatile());
- if (TD.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
+ if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"oldload");
- Old = convertValue(TD, IRB, Old, IntTy);
+ Old = convertValue(DL, IRB, Old, IntTy);
assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
- V = insertInteger(TD, IRB, Old, SI.getValueOperand(), Offset,
+ V = insertInteger(DL, IRB, Old, SI.getValueOperand(), Offset,
"insert");
}
- V = convertValue(TD, IRB, V, NewAllocaTy);
+ V = convertValue(DL, IRB, V, NewAllocaTy);
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
Pass.DeadInsts.insert(&SI);
(void)Store;
if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
Pass.PostPromotionWorklist.insert(AI);
- // Compute the intersecting offset range.
- assert(BeginOffset < NewAllocaEndOffset);
- assert(EndOffset > NewAllocaBeginOffset);
- uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
- uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
-
- uint64_t Size = NewEndOffset - NewBeginOffset;
- if (Size < TD.getTypeStoreSize(V->getType())) {
+ if (SliceSize < DL.getTypeStoreSize(V->getType())) {
assert(!SI.isVolatile());
assert(V->getType()->isIntegerTy() &&
"Only integer type loads and stores are split");
assert(V->getType()->getIntegerBitWidth() ==
- TD.getTypeStoreSizeInBits(V->getType()) &&
+ DL.getTypeStoreSizeInBits(V->getType()) &&
"Non-byte-multiple bit width");
- IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8);
- V = extractInteger(TD, IRB, V, NarrowTy, NewBeginOffset,
+ IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8);
+ V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset,
"extract");
}
if (VecTy)
- return rewriteVectorizedStoreInst(V, SI, OldOp, NewBeginOffset,
- NewEndOffset);
+ return rewriteVectorizedStoreInst(V, SI, OldOp);
if (IntTy && V->getType()->isIntegerTy())
- return rewriteIntegerStore(V, SI, NewBeginOffset, NewEndOffset);
+ return rewriteIntegerStore(V, SI);
StoreInst *NewSI;
if (NewBeginOffset == NewAllocaBeginOffset &&
NewEndOffset == NewAllocaEndOffset &&
- canConvertValue(TD, V->getType(), NewAllocaTy)) {
- V = convertValue(TD, IRB, V, NewAllocaTy);
+ canConvertValue(DL, V->getType(), NewAllocaTy)) {
+ V = convertValue(DL, IRB, V, NewAllocaTy);
NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
SI.isVolatile());
} else {
- Value *NewPtr = getAdjustedAllocaPtr(IRB, NewBeginOffset,
- V->getType()->getPointerTo());
- NewSI = IRB.CreateAlignedStore(
- V, NewPtr, getOffsetTypeAlign(
- V->getType(), NewBeginOffset - NewAllocaBeginOffset),
- SI.isVolatile());
+ Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo());
+ NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()),
+ SI.isVolatile());
}
(void)NewSI;
Pass.DeadInsts.insert(&SI);
// pointer to the new alloca.
if (!isa<Constant>(II.getLength())) {
assert(!IsSplit);
- assert(BeginOffset >= NewAllocaBeginOffset);
- II.setDest(
- getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType()));
+ assert(NewBeginOffset == BeginOffset);
+ II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType()));
Type *CstTy = II.getAlignmentCst()->getType();
- II.setAlignment(ConstantInt::get(CstTy, getOffsetAlign(BeginOffset)));
+ II.setAlignment(ConstantInt::get(CstTy, getSliceAlign()));
deleteIfTriviallyDead(OldPtr);
return false;
Type *AllocaTy = NewAI.getAllocatedType();
Type *ScalarTy = AllocaTy->getScalarType();
- // Compute the intersecting offset range.
- assert(BeginOffset < NewAllocaEndOffset);
- assert(EndOffset > NewAllocaBeginOffset);
- uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
- uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
- uint64_t PartitionOffset = NewBeginOffset - NewAllocaBeginOffset;
-
// If this doesn't map cleanly onto the alloca type, and that type isn't
// a single value type, just emit a memset.
if (!VecTy && !IntTy &&
(BeginOffset > NewAllocaBeginOffset ||
EndOffset < NewAllocaEndOffset ||
+ SliceSize != DL.getTypeStoreSize(AllocaTy) ||
!AllocaTy->isSingleValueType() ||
- !TD.isLegalInteger(TD.getTypeSizeInBits(ScalarTy)) ||
- TD.getTypeSizeInBits(ScalarTy)%8 != 0)) {
+ !DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy)) ||
+ DL.getTypeSizeInBits(ScalarTy)%8 != 0)) {
Type *SizeTy = II.getLength()->getType();
Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
CallInst *New = IRB.CreateMemSet(
- getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getRawDest()->getType()),
- II.getValue(), Size, getOffsetAlign(PartitionOffset),
- II.isVolatile());
+ getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size,
+ getSliceAlign(), II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return false;
assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
Value *Splat =
- getIntegerSplat(II.getValue(), TD.getTypeSizeInBits(ElementTy) / 8);
- Splat = convertValue(TD, IRB, Splat, ElementTy);
+ getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ElementTy) / 8);
+ Splat = convertValue(DL, IRB, Splat, ElementTy);
if (NumElements > 1)
Splat = getVectorSplat(Splat, NumElements);
EndOffset != NewAllocaBeginOffset)) {
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"oldload");
- Old = convertValue(TD, IRB, Old, IntTy);
+ Old = convertValue(DL, IRB, Old, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
- V = insertInteger(TD, IRB, Old, V, Offset, "insert");
+ V = insertInteger(DL, IRB, Old, V, Offset, "insert");
} else {
assert(V->getType() == IntTy &&
"Wrong type for an alloca wide integer!");
}
- V = convertValue(TD, IRB, V, AllocaTy);
+ V = convertValue(DL, IRB, V, AllocaTy);
} else {
// Established these invariants above.
assert(NewBeginOffset == NewAllocaBeginOffset);
assert(NewEndOffset == NewAllocaEndOffset);
- V = getIntegerSplat(II.getValue(), TD.getTypeSizeInBits(ScalarTy) / 8);
+ V = getIntegerSplat(II.getValue(), DL.getTypeSizeInBits(ScalarTy) / 8);
if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy))
V = getVectorSplat(V, AllocaVecTy->getNumElements());
- V = convertValue(TD, IRB, V, AllocaTy);
+ V = convertValue(DL, IRB, V, AllocaTy);
}
Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
DEBUG(dbgs() << " original: " << II << "\n");
- // Compute the intersecting offset range.
- assert(BeginOffset < NewAllocaEndOffset);
- assert(EndOffset > NewAllocaBeginOffset);
- uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
- uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
-
- assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr);
- bool IsDest = II.getRawDest() == OldPtr;
-
- // Compute the relative offset within the transfer.
- unsigned IntPtrWidth = TD.getPointerSizeInBits();
- APInt RelOffset(IntPtrWidth, NewBeginOffset - BeginOffset);
+ bool IsDest = &II.getRawDestUse() == OldUse;
+ assert((IsDest && II.getRawDest() == OldPtr) ||
+ (!IsDest && II.getRawSource() == OldPtr));
- unsigned Align = II.getAlignment();
- uint64_t PartitionOffset = NewBeginOffset - NewAllocaBeginOffset;
- if (Align > 1)
- Align = MinAlign(
- RelOffset.zextOrTrunc(64).getZExtValue(),
- MinAlign(II.getAlignment(), getOffsetAlign(PartitionOffset)));
+ unsigned SliceAlign = getSliceAlign();
// For unsplit intrinsics, we simply modify the source and destination
// pointers in place. This isn't just an optimization, it is a matter of
// memcpy, and so simply updating the pointers is the necessary for us to
// update both source and dest of a single call.
if (!IsSplittable) {
- Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource();
+ Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
if (IsDest)
- II.setDest(
- getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType()));
+ II.setDest(AdjustedPtr);
else
- II.setSource(getAdjustedAllocaPtr(IRB, BeginOffset,
- II.getRawSource()->getType()));
+ II.setSource(AdjustedPtr);
- Type *CstTy = II.getAlignmentCst()->getType();
- II.setAlignment(ConstantInt::get(CstTy, Align));
+ if (II.getAlignment() > SliceAlign) {
+ Type *CstTy = II.getAlignmentCst()->getType();
+ II.setAlignment(
+ ConstantInt::get(CstTy, MinAlign(II.getAlignment(), SliceAlign)));
+ }
DEBUG(dbgs() << " to: " << II << "\n");
- deleteIfTriviallyDead(OldOp);
+ deleteIfTriviallyDead(OldPtr);
return false;
}
// For split transfer intrinsics we have an incredibly useful assurance:
// If this doesn't map cleanly onto the alloca type, and that type isn't
// a single value type, just emit a memcpy.
- bool EmitMemCpy
- = !VecTy && !IntTy && (BeginOffset > NewAllocaBeginOffset ||
- EndOffset < NewAllocaEndOffset ||
- !NewAI.getAllocatedType()->isSingleValueType());
+ bool EmitMemCpy =
+ !VecTy && !IntTy &&
+ (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset ||
+ SliceSize != DL.getTypeStoreSize(NewAI.getAllocatedType()) ||
+ !NewAI.getAllocatedType()->isSingleValueType());
// If we're just going to emit a memcpy, the alloca hasn't changed, and the
// size hasn't been shrunk based on analysis of the viable range, this is
// alloca that should be re-examined after rewriting this instruction.
Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
if (AllocaInst *AI
- = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets()))
+ = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets())) {
+ assert(AI != &OldAI && AI != &NewAI &&
+ "Splittable transfers cannot reach the same alloca on both ends.");
Pass.Worklist.insert(AI);
+ }
- if (EmitMemCpy) {
- Type *OtherPtrTy = IsDest ? II.getRawSource()->getType()
- : II.getRawDest()->getType();
+ Type *OtherPtrTy = OtherPtr->getType();
+ unsigned OtherAS = OtherPtrTy->getPointerAddressSpace();
+
+ // Compute the relative offset for the other pointer within the transfer.
+ unsigned IntPtrWidth = DL.getPointerSizeInBits(OtherAS);
+ APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset);
+ unsigned OtherAlign = MinAlign(II.getAlignment() ? II.getAlignment() : 1,
+ OtherOffset.zextOrTrunc(64).getZExtValue());
+ if (EmitMemCpy) {
// Compute the other pointer, folding as much as possible to produce
// a single, simple GEP in most cases.
- OtherPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy);
+ OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
+ OtherPtr->getName() + ".");
- Value *OurPtr = getAdjustedAllocaPtr(
- IRB, NewBeginOffset,
- IsDest ? II.getRawDest()->getType() : II.getRawSource()->getType());
+ Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
Type *SizeTy = II.getLength()->getType();
Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
- CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
- IsDest ? OtherPtr : OurPtr,
- Size, Align, II.isVolatile());
+ CallInst *New = IRB.CreateMemCpy(
+ IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, Size,
+ MinAlign(SliceAlign, OtherAlign), II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return false;
}
- // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy
- // is equivalent to 1, but that isn't true if we end up rewriting this as
- // a load or store.
- if (!Align)
- Align = 1;
-
bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset &&
NewEndOffset == NewAllocaEndOffset;
uint64_t Size = NewEndOffset - NewBeginOffset;
unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0;
unsigned NumElements = EndIndex - BeginIndex;
IntegerType *SubIntTy
- = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0;
+ = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : nullptr;
- Type *OtherPtrTy = NewAI.getType();
+ // Reset the other pointer type to match the register type we're going to
+ // use, but using the address space of the original other pointer.
if (VecTy && !IsWholeAlloca) {
if (NumElements == 1)
OtherPtrTy = VecTy->getElementType();
else
OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements);
- OtherPtrTy = OtherPtrTy->getPointerTo();
+ OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS);
} else if (IntTy && !IsWholeAlloca) {
- OtherPtrTy = SubIntTy->getPointerTo();
+ OtherPtrTy = SubIntTy->getPointerTo(OtherAS);
+ } else {
+ OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS);
}
- Value *SrcPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy);
+ Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
+ OtherPtr->getName() + ".");
+ unsigned SrcAlign = OtherAlign;
Value *DstPtr = &NewAI;
- if (!IsDest)
+ unsigned DstAlign = SliceAlign;
+ if (!IsDest) {
std::swap(SrcPtr, DstPtr);
+ std::swap(SrcAlign, DstAlign);
+ }
Value *Src;
if (VecTy && !IsWholeAlloca && !IsDest) {
} else if (IntTy && !IsWholeAlloca && !IsDest) {
Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"load");
- Src = convertValue(TD, IRB, Src, IntTy);
+ Src = convertValue(DL, IRB, Src, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
- Src = extractInteger(TD, IRB, Src, SubIntTy, Offset, "extract");
+ Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
} else {
- Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(),
+ Src = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(),
"copyload");
}
} else if (IntTy && !IsWholeAlloca && IsDest) {
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"oldload");
- Old = convertValue(TD, IRB, Old, IntTy);
+ Old = convertValue(DL, IRB, Old, IntTy);
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
- Src = insertInteger(TD, IRB, Old, Src, Offset, "insert");
- Src = convertValue(TD, IRB, Src, NewAllocaTy);
+ Src = insertInteger(DL, IRB, Old, Src, Offset, "insert");
+ Src = convertValue(DL, IRB, Src, NewAllocaTy);
}
StoreInst *Store = cast<StoreInst>(
- IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile()));
+ IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile()));
(void)Store;
DEBUG(dbgs() << " to: " << *Store << "\n");
return !II.isVolatile();
DEBUG(dbgs() << " original: " << II << "\n");
assert(II.getArgOperand(1) == OldPtr);
- // Compute the intersecting offset range.
- assert(BeginOffset < NewAllocaEndOffset);
- assert(EndOffset > NewAllocaBeginOffset);
- uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset);
- uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset);
-
// Record this instruction for deletion.
Pass.DeadInsts.insert(&II);
ConstantInt *Size
= ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
NewEndOffset - NewBeginOffset);
- Value *Ptr =
- getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getArgOperand(1)->getType());
+ Value *Ptr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
Value *New;
if (II.getIntrinsicID() == Intrinsic::lifetime_start)
New = IRB.CreateLifetimeStart(Ptr, Size);
// as local as possible to the PHI. To do that, we re-use the location of
// the old pointer, which necessarily must be in the right position to
// dominate the PHI.
- IRBuilderTy PtrBuilder(cast<Instruction>(OldPtr));
- PtrBuilder.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) +
- ".");
+ IRBuilderTy PtrBuilder(IRB);
+ if (isa<PHINode>(OldPtr))
+ PtrBuilder.SetInsertPoint(OldPtr->getParent()->getFirstInsertionPt());
+ else
+ PtrBuilder.SetInsertPoint(OldPtr);
+ PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc());
- Value *NewPtr =
- getAdjustedAllocaPtr(PtrBuilder, BeginOffset, OldPtr->getType());
+ Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType());
// Replace the operands which were using the old pointer.
std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
DEBUG(dbgs() << " to: " << PN << "\n");
deleteIfTriviallyDead(OldPtr);
- // Check whether we can speculate this PHI node, and if so remember that
- // fact and return that this alloca remains viable for promotion to an SSA
- // value.
- if (isSafePHIToSpeculate(PN, &TD)) {
- Pass.SpeculatablePHIs.insert(&PN);
- return true;
- }
-
- return false; // PHIs can't be promoted on their own.
+ // PHIs can't be promoted on their own, but often can be speculated. We
+ // check the speculation outside of the rewriter so that we see the
+ // fully-rewritten alloca.
+ PHIUsers.insert(&PN);
+ return true;
}
bool visitSelectInst(SelectInst &SI) {
assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable");
assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable");
- Value *NewPtr = getAdjustedAllocaPtr(IRB, BeginOffset, OldPtr->getType());
+ Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
// Replace the operands which were using the old pointer.
if (SI.getOperand(1) == OldPtr)
SI.setOperand(1, NewPtr);
DEBUG(dbgs() << " to: " << SI << "\n");
deleteIfTriviallyDead(OldPtr);
- // Check whether we can speculate this select instruction, and if so
- // remember that fact and return that this alloca remains viable for
- // promotion to an SSA value.
- if (isSafeSelectToSpeculate(SI, &TD)) {
- Pass.SpeculatableSelects.insert(&SI);
- return true;
- }
-
- return false; // Selects can't be promoted on their own.
+ // Selects can't be promoted on their own, but often can be speculated. We
+ // check the speculation outside of the rewriter so that we see the
+ // fully-rewritten alloca.
+ SelectUsers.insert(&SI);
+ return true;
}
};
// Befriend the base class so it can delegate to private visit methods.
friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>;
- const DataLayout &TD;
+ const DataLayout &DL;
/// Queue of pointer uses to analyze and potentially rewrite.
SmallVector<Use *, 8> Queue;
Use *U;
public:
- AggLoadStoreRewriter(const DataLayout &TD) : TD(TD) {}
+ AggLoadStoreRewriter(const DataLayout &DL) : DL(DL) {}
/// Rewrite loads and stores through a pointer and all pointers derived from
/// it.
/// Enqueue all the users of the given instruction for further processing.
/// This uses a set to de-duplicate users.
void enqueueUsers(Instruction &I) {
- for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
- ++UI)
- if (Visited.insert(*UI))
- Queue.push_back(&UI.getUse());
+ for (Use &U : I.uses())
+ if (Visited.insert(U.getUser()))
+ Queue.push_back(&U);
}
// Conservative default is to not rewrite anything.
/// when the size or offset cause either end of type-based partition to be off.
/// Also, this is a best-effort routine. It is reasonable to give up and not
/// return a type if necessary.
-static Type *getTypePartition(const DataLayout &TD, Type *Ty,
+static Type *getTypePartition(const DataLayout &DL, Type *Ty,
uint64_t Offset, uint64_t Size) {
- if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size)
- return stripAggregateTypeWrapping(TD, Ty);
- if (Offset > TD.getTypeAllocSize(Ty) ||
- (TD.getTypeAllocSize(Ty) - Offset) < Size)
- return 0;
+ if (Offset == 0 && DL.getTypeAllocSize(Ty) == Size)
+ return stripAggregateTypeWrapping(DL, Ty);
+ if (Offset > DL.getTypeAllocSize(Ty) ||
+ (DL.getTypeAllocSize(Ty) - Offset) < Size)
+ return nullptr;
if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
// We can't partition pointers...
if (SeqTy->isPointerTy())
- return 0;
+ return nullptr;
Type *ElementTy = SeqTy->getElementType();
- uint64_t ElementSize = TD.getTypeAllocSize(ElementTy);
+ uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
uint64_t NumSkippedElements = Offset / ElementSize;
if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy)) {
if (NumSkippedElements >= ArrTy->getNumElements())
- return 0;
+ return nullptr;
} else if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy)) {
if (NumSkippedElements >= VecTy->getNumElements())
- return 0;
+ return nullptr;
}
Offset -= NumSkippedElements * ElementSize;
if (Offset > 0 || Size < ElementSize) {
// Bail if the partition ends in a different array element.
if ((Offset + Size) > ElementSize)
- return 0;
+ return nullptr;
// Recurse through the element type trying to peel off offset bytes.
- return getTypePartition(TD, ElementTy, Offset, Size);
+ return getTypePartition(DL, ElementTy, Offset, Size);
}
assert(Offset == 0);
if (Size == ElementSize)
- return stripAggregateTypeWrapping(TD, ElementTy);
+ return stripAggregateTypeWrapping(DL, ElementTy);
assert(Size > ElementSize);
uint64_t NumElements = Size / ElementSize;
if (NumElements * ElementSize != Size)
- return 0;
+ return nullptr;
return ArrayType::get(ElementTy, NumElements);
}
StructType *STy = dyn_cast<StructType>(Ty);
if (!STy)
- return 0;
+ return nullptr;
- const StructLayout *SL = TD.getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
if (Offset >= SL->getSizeInBytes())
- return 0;
+ return nullptr;
uint64_t EndOffset = Offset + Size;
if (EndOffset > SL->getSizeInBytes())
- return 0;
+ return nullptr;
unsigned Index = SL->getElementContainingOffset(Offset);
Offset -= SL->getElementOffset(Index);
Type *ElementTy = STy->getElementType(Index);
- uint64_t ElementSize = TD.getTypeAllocSize(ElementTy);
+ uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
if (Offset >= ElementSize)
- return 0; // The offset points into alignment padding.
+ return nullptr; // The offset points into alignment padding.
// See if any partition must be contained by the element.
if (Offset > 0 || Size < ElementSize) {
if ((Offset + Size) > ElementSize)
- return 0;
- return getTypePartition(TD, ElementTy, Offset, Size);
+ return nullptr;
+ return getTypePartition(DL, ElementTy, Offset, Size);
}
assert(Offset == 0);
if (Size == ElementSize)
- return stripAggregateTypeWrapping(TD, ElementTy);
+ return stripAggregateTypeWrapping(DL, ElementTy);
StructType::element_iterator EI = STy->element_begin() + Index,
EE = STy->element_end();
if (EndOffset < SL->getSizeInBytes()) {
unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
if (Index == EndIndex)
- return 0; // Within a single element and its padding.
+ return nullptr; // Within a single element and its padding.
// Don't try to form "natural" types if the elements don't line up with the
// expected size.
// FIXME: We could potentially recurse down through the last element in the
// sub-struct to find a natural end point.
if (SL->getElementOffset(EndIndex) != EndOffset)
- return 0;
+ return nullptr;
assert(Index < EndIndex);
EE = STy->element_begin() + EndIndex;
// Try to build up a sub-structure.
StructType *SubTy = StructType::get(STy->getContext(), makeArrayRef(EI, EE),
STy->isPacked());
- const StructLayout *SubSL = TD.getStructLayout(SubTy);
+ const StructLayout *SubSL = DL.getStructLayout(SubTy);
if (Size != SubSL->getSizeInBytes())
- return 0; // The sub-struct doesn't have quite the size needed.
+ return nullptr; // The sub-struct doesn't have quite the size needed.
return SubTy;
}
/// appropriate new offsets. It also evaluates how successful the rewrite was
/// at enabling promotion and if it was successful queues the alloca to be
/// promoted.
-bool SROA::rewritePartitions(AllocaInst &AI, AllocaPartitioning &P,
- AllocaPartitioning::iterator B,
- AllocaPartitioning::iterator E,
- int64_t BeginOffset, int64_t EndOffset,
- ArrayRef<AllocaPartitioning::iterator> SplitUses) {
+bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
+ AllocaSlices::iterator B, AllocaSlices::iterator E,
+ int64_t BeginOffset, int64_t EndOffset,
+ ArrayRef<AllocaSlices::iterator> SplitUses) {
assert(BeginOffset < EndOffset);
- uint64_t PartitionSize = EndOffset - BeginOffset;
+ uint64_t SliceSize = EndOffset - BeginOffset;
// Try to compute a friendly type for this partition of the alloca. This
// won't always succeed, in which case we fall back to a legal integer type
// or an i8 array of an appropriate size.
- Type *PartitionTy = 0;
+ Type *SliceTy = nullptr;
if (Type *CommonUseTy = findCommonType(B, E, EndOffset))
- if (TD->getTypeAllocSize(CommonUseTy) >= PartitionSize)
- PartitionTy = CommonUseTy;
- if (!PartitionTy)
- if (Type *TypePartitionTy = getTypePartition(*TD, AI.getAllocatedType(),
- BeginOffset, PartitionSize))
- PartitionTy = TypePartitionTy;
- if ((!PartitionTy || (PartitionTy->isArrayTy() &&
- PartitionTy->getArrayElementType()->isIntegerTy())) &&
- TD->isLegalInteger(PartitionSize * 8))
- PartitionTy = Type::getIntNTy(*C, PartitionSize * 8);
- if (!PartitionTy)
- PartitionTy = ArrayType::get(Type::getInt8Ty(*C), PartitionSize);
- assert(TD->getTypeAllocSize(PartitionTy) >= PartitionSize);
-
- bool IsVectorPromotable = isVectorPromotionViable(
- *TD, PartitionTy, P, BeginOffset, EndOffset, B, E, SplitUses);
-
- bool IsIntegerPromotable =
- !IsVectorPromotable &&
- isIntegerWideningViable(*TD, PartitionTy, BeginOffset, P, B, E,
- SplitUses);
+ if (DL->getTypeAllocSize(CommonUseTy) >= SliceSize)
+ SliceTy = CommonUseTy;
+ if (!SliceTy)
+ if (Type *TypePartitionTy = getTypePartition(*DL, AI.getAllocatedType(),
+ BeginOffset, SliceSize))
+ SliceTy = TypePartitionTy;
+ if ((!SliceTy || (SliceTy->isArrayTy() &&
+ SliceTy->getArrayElementType()->isIntegerTy())) &&
+ DL->isLegalInteger(SliceSize * 8))
+ SliceTy = Type::getIntNTy(*C, SliceSize * 8);
+ if (!SliceTy)
+ SliceTy = ArrayType::get(Type::getInt8Ty(*C), SliceSize);
+ assert(DL->getTypeAllocSize(SliceTy) >= SliceSize);
+
+ bool IsIntegerPromotable = isIntegerWideningViable(
+ *DL, SliceTy, BeginOffset, AllocaSlices::const_range(B, E), SplitUses);
+
+ VectorType *VecTy =
+ IsIntegerPromotable
+ ? nullptr
+ : isVectorPromotionViable(*DL, SliceTy, BeginOffset, EndOffset,
+ AllocaSlices::const_range(B, E), SplitUses);
+ if (VecTy)
+ SliceTy = VecTy;
// Check for the case where we're going to rewrite to a new alloca of the
// exact same type as the original, and with the same access offsets. In that
// case, re-use the existing alloca, but still run through the rewriter to
// perform phi and select speculation.
AllocaInst *NewAI;
- if (PartitionTy == AI.getAllocatedType()) {
+ if (SliceTy == AI.getAllocatedType()) {
assert(BeginOffset == 0 &&
"Non-zero begin offset but same alloca type");
NewAI = &AI;
// The minimum alignment which users can rely on when the explicit
// alignment is omitted or zero is that required by the ABI for this
// type.
- Alignment = TD->getABITypeAlignment(AI.getAllocatedType());
+ Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
}
Alignment = MinAlign(Alignment, BeginOffset);
// If we will get at least this much alignment from the type alone, leave
// the alloca's alignment unconstrained.
- if (Alignment <= TD->getABITypeAlignment(PartitionTy))
+ if (Alignment <= DL->getABITypeAlignment(SliceTy))
Alignment = 0;
- NewAI = new AllocaInst(PartitionTy, 0, Alignment,
- AI.getName() + ".sroa." + Twine(B - P.begin()), &AI);
+ NewAI =
+ new AllocaInst(SliceTy, nullptr, Alignment,
+ AI.getName() + ".sroa." + Twine(B - AS.begin()), &AI);
++NumNewAllocas;
}
<< "[" << BeginOffset << "," << EndOffset << ") to: " << *NewAI
<< "\n");
- // Track the high watermark on several worklists that are only relevant for
+ // Track the high watermark on the worklist as it is only relevant for
// promoted allocas. We will reset it to this point if the alloca is not in
// fact scheduled for promotion.
unsigned PPWOldSize = PostPromotionWorklist.size();
- unsigned SPOldSize = SpeculatablePHIs.size();
- unsigned SSOldSize = SpeculatableSelects.size();
+ unsigned NumUses = 0;
+ SmallPtrSet<PHINode *, 8> PHIUsers;
+ SmallPtrSet<SelectInst *, 8> SelectUsers;
- AllocaPartitionRewriter Rewriter(*TD, P, *this, AI, *NewAI, BeginOffset,
- EndOffset, IsVectorPromotable,
- IsIntegerPromotable);
+ AllocaSliceRewriter Rewriter(*DL, AS, *this, AI, *NewAI, BeginOffset,
+ EndOffset, IsIntegerPromotable, VecTy, PHIUsers,
+ SelectUsers);
bool Promotable = true;
- for (ArrayRef<AllocaPartitioning::iterator>::const_iterator
- SUI = SplitUses.begin(),
- SUE = SplitUses.end();
- SUI != SUE; ++SUI) {
+ for (auto & SplitUse : SplitUses) {
DEBUG(dbgs() << " rewriting split ");
- DEBUG(P.printPartition(dbgs(), *SUI, ""));
- Promotable &= Rewriter.visit(*SUI);
+ DEBUG(AS.printSlice(dbgs(), SplitUse, ""));
+ Promotable &= Rewriter.visit(SplitUse);
+ ++NumUses;
}
- for (AllocaPartitioning::iterator I = B; I != E; ++I) {
+ for (AllocaSlices::iterator I = B; I != E; ++I) {
DEBUG(dbgs() << " rewriting ");
- DEBUG(P.printPartition(dbgs(), I, ""));
+ DEBUG(AS.printSlice(dbgs(), I, ""));
Promotable &= Rewriter.visit(I);
+ ++NumUses;
}
- if (Promotable && (SpeculatablePHIs.size() > SPOldSize ||
- SpeculatableSelects.size() > SSOldSize)) {
- // If we have a promotable alloca except for some unspeculated loads below
- // PHIs or Selects, iterate once. We will speculate the loads and on the
- // next iteration rewrite them into a promotable form.
- Worklist.insert(NewAI);
- } else if (Promotable) {
- DEBUG(dbgs() << " and queuing for promotion\n");
- PromotableAllocas.push_back(NewAI);
- } else if (NewAI != &AI) {
+ NumAllocaPartitionUses += NumUses;
+ MaxUsesPerAllocaPartition =
+ std::max<unsigned>(NumUses, MaxUsesPerAllocaPartition);
+
+ // Now that we've processed all the slices in the new partition, check if any
+ // PHIs or Selects would block promotion.
+ for (SmallPtrSetImpl<PHINode *>::iterator I = PHIUsers.begin(),
+ E = PHIUsers.end();
+ I != E; ++I)
+ if (!isSafePHIToSpeculate(**I, DL)) {
+ Promotable = false;
+ PHIUsers.clear();
+ SelectUsers.clear();
+ break;
+ }
+ for (SmallPtrSetImpl<SelectInst *>::iterator I = SelectUsers.begin(),
+ E = SelectUsers.end();
+ I != E; ++I)
+ if (!isSafeSelectToSpeculate(**I, DL)) {
+ Promotable = false;
+ PHIUsers.clear();
+ SelectUsers.clear();
+ break;
+ }
+
+ if (Promotable) {
+ if (PHIUsers.empty() && SelectUsers.empty()) {
+ // Promote the alloca.
+ PromotableAllocas.push_back(NewAI);
+ } else {
+ // If we have either PHIs or Selects to speculate, add them to those
+ // worklists and re-queue the new alloca so that we promote in on the
+ // next iteration.
+ for (PHINode *PHIUser : PHIUsers)
+ SpeculatablePHIs.insert(PHIUser);
+ for (SelectInst *SelectUser : SelectUsers)
+ SpeculatableSelects.insert(SelectUser);
+ Worklist.insert(NewAI);
+ }
+ } else {
// If we can't promote the alloca, iterate on it to check for new
// refinements exposed by splitting the current alloca. Don't iterate on an
// alloca which didn't actually change and didn't get promoted.
- // FIXME: We should actually track whether the rewriter changed anything.
- Worklist.insert(NewAI);
- }
+ if (NewAI != &AI)
+ Worklist.insert(NewAI);
- // Drop any post-promotion work items if promotion didn't happen.
- if (!Promotable) {
+ // Drop any post-promotion work items if promotion didn't happen.
while (PostPromotionWorklist.size() > PPWOldSize)
PostPromotionWorklist.pop_back();
- while (SpeculatablePHIs.size() > SPOldSize)
- SpeculatablePHIs.pop_back();
- while (SpeculatableSelects.size() > SSOldSize)
- SpeculatableSelects.pop_back();
}
return true;
}
-namespace {
- struct IsPartitionEndLessOrEqualTo {
- uint64_t UpperBound;
-
- IsPartitionEndLessOrEqualTo(uint64_t UpperBound) : UpperBound(UpperBound) {}
-
- bool operator()(const AllocaPartitioning::iterator &I) {
- return I->endOffset() <= UpperBound;
- }
- };
-}
-
-static void removeFinishedSplitUses(
- SmallVectorImpl<AllocaPartitioning::iterator> &SplitUses,
- uint64_t &MaxSplitUseEndOffset, uint64_t Offset) {
+static void
+removeFinishedSplitUses(SmallVectorImpl<AllocaSlices::iterator> &SplitUses,
+ uint64_t &MaxSplitUseEndOffset, uint64_t Offset) {
if (Offset >= MaxSplitUseEndOffset) {
SplitUses.clear();
MaxSplitUseEndOffset = 0;
size_t SplitUsesOldSize = SplitUses.size();
SplitUses.erase(std::remove_if(SplitUses.begin(), SplitUses.end(),
- IsPartitionEndLessOrEqualTo(Offset)),
+ [Offset](const AllocaSlices::iterator &I) {
+ return I->endOffset() <= Offset;
+ }),
SplitUses.end());
if (SplitUsesOldSize == SplitUses.size())
return;
// Recompute the max. While this is linear, so is remove_if.
MaxSplitUseEndOffset = 0;
- for (SmallVectorImpl<AllocaPartitioning::iterator>::iterator
- SUI = SplitUses.begin(),
- SUE = SplitUses.end();
- SUI != SUE; ++SUI)
- MaxSplitUseEndOffset = std::max((*SUI)->endOffset(), MaxSplitUseEndOffset);
+ for (AllocaSlices::iterator SplitUse : SplitUses)
+ MaxSplitUseEndOffset =
+ std::max(SplitUse->endOffset(), MaxSplitUseEndOffset);
}
-/// \brief Walks the partitioning of an alloca rewriting uses of each partition.
-bool SROA::splitAlloca(AllocaInst &AI, AllocaPartitioning &P) {
- if (P.begin() == P.end())
+/// \brief Walks the slices of an alloca and form partitions based on them,
+/// rewriting each of their uses.
+bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
+ if (AS.begin() == AS.end())
return false;
+ unsigned NumPartitions = 0;
bool Changed = false;
- SmallVector<AllocaPartitioning::iterator, 4> SplitUses;
+ SmallVector<AllocaSlices::iterator, 4> SplitUses;
uint64_t MaxSplitUseEndOffset = 0;
- uint64_t BeginOffset = P.begin()->beginOffset();
+ uint64_t BeginOffset = AS.begin()->beginOffset();
- for (AllocaPartitioning::iterator PI = P.begin(), PJ = llvm::next(PI),
- PE = P.end();
- PI != PE; PI = PJ) {
- uint64_t MaxEndOffset = PI->endOffset();
+ for (AllocaSlices::iterator SI = AS.begin(), SJ = std::next(SI),
+ SE = AS.end();
+ SI != SE; SI = SJ) {
+ uint64_t MaxEndOffset = SI->endOffset();
- if (!PI->isSplittable()) {
- // When we're forming an unsplittable region, it must always start at he
- // first partitioning use and will extend through its end.
- assert(BeginOffset == PI->beginOffset());
+ if (!SI->isSplittable()) {
+ // When we're forming an unsplittable region, it must always start at the
+ // first slice and will extend through its end.
+ assert(BeginOffset == SI->beginOffset());
- // Rewrite a partition including all of the overlapping uses with this
- // unsplittable partition.
- while (PJ != PE && PJ->beginOffset() < MaxEndOffset) {
- if (!PJ->isSplittable())
- MaxEndOffset = std::max(MaxEndOffset, PJ->endOffset());
- ++PJ;
+ // Form a partition including all of the overlapping slices with this
+ // unsplittable slice.
+ while (SJ != SE && SJ->beginOffset() < MaxEndOffset) {
+ if (!SJ->isSplittable())
+ MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset());
+ ++SJ;
}
} else {
- assert(PI->isSplittable()); // Established above.
+ assert(SI->isSplittable()); // Established above.
- // Collect all of the overlapping splittable partitions.
- while (PJ != PE && PJ->beginOffset() < MaxEndOffset &&
- PJ->isSplittable()) {
- MaxEndOffset = std::max(MaxEndOffset, PJ->endOffset());
- ++PJ;
+ // Collect all of the overlapping splittable slices.
+ while (SJ != SE && SJ->beginOffset() < MaxEndOffset &&
+ SJ->isSplittable()) {
+ MaxEndOffset = std::max(MaxEndOffset, SJ->endOffset());
+ ++SJ;
}
- // Back up MaxEndOffset and PJ if we ended the span early when
- // encountering an unsplittable partition.
- if (PJ != PE && PJ->beginOffset() < MaxEndOffset) {
- assert(!PJ->isSplittable());
- MaxEndOffset = PJ->beginOffset();
+ // Back up MaxEndOffset and SJ if we ended the span early when
+ // encountering an unsplittable slice.
+ if (SJ != SE && SJ->beginOffset() < MaxEndOffset) {
+ assert(!SJ->isSplittable());
+ MaxEndOffset = SJ->beginOffset();
}
}
// Check if we have managed to move the end offset forward yet. If so,
// we'll have to rewrite uses and erase old split uses.
if (BeginOffset < MaxEndOffset) {
- // Rewrite a sequence of overlapping partition uses.
- Changed |= rewritePartitions(AI, P, PI, PJ, BeginOffset,
- MaxEndOffset, SplitUses);
+ // Rewrite a sequence of overlapping slices.
+ Changed |= rewritePartition(AI, AS, SI, SJ, BeginOffset, MaxEndOffset,
+ SplitUses);
+ ++NumPartitions;
removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset, MaxEndOffset);
}
- // Accumulate all the splittable partitions from the [PI,PJ) region which
+ // Accumulate all the splittable slices from the [SI,SJ) region which
// overlap going forward.
- for (AllocaPartitioning::iterator PII = PI, PIE = PJ; PII != PIE; ++PII)
- if (PII->isSplittable() && PII->endOffset() > MaxEndOffset) {
- SplitUses.push_back(PII);
- MaxSplitUseEndOffset = std::max(PII->endOffset(), MaxSplitUseEndOffset);
+ for (AllocaSlices::iterator SK = SI; SK != SJ; ++SK)
+ if (SK->isSplittable() && SK->endOffset() > MaxEndOffset) {
+ SplitUses.push_back(SK);
+ MaxSplitUseEndOffset = std::max(SK->endOffset(), MaxSplitUseEndOffset);
}
// If we're already at the end and we have no split uses, we're done.
- if (PJ == PE && SplitUses.empty())
+ if (SJ == SE && SplitUses.empty())
break;
// If we have no split uses or no gap in offsets, we're ready to move to
- // the next partitioning use.
- if (SplitUses.empty() || (PJ != PE && MaxEndOffset == PJ->beginOffset())) {
- BeginOffset = PJ->beginOffset();
+ // the next slice.
+ if (SplitUses.empty() || (SJ != SE && MaxEndOffset == SJ->beginOffset())) {
+ BeginOffset = SJ->beginOffset();
continue;
}
- // Even if we have split uses, if the next partitioning use is splittable
- // and the split uses reach it, we can simply set up the beginning offset
- // to bridge between them.
- if (PJ != PE && PJ->isSplittable() && MaxSplitUseEndOffset > PJ->beginOffset()) {
+ // Even if we have split slices, if the next slice is splittable and the
+ // split slices reach it, we can simply set up the beginning offset of the
+ // next iteration to bridge between them.
+ if (SJ != SE && SJ->isSplittable() &&
+ MaxSplitUseEndOffset > SJ->beginOffset()) {
BeginOffset = MaxEndOffset;
continue;
}
- // Otherwise, we have a tail of split uses. Rewrite them with an empty
- // range of partitioning uses.
+ // Otherwise, we have a tail of split slices. Rewrite them with an empty
+ // range of slices.
uint64_t PostSplitEndOffset =
- PJ == PE ? MaxSplitUseEndOffset : PJ->beginOffset();
+ SJ == SE ? MaxSplitUseEndOffset : SJ->beginOffset();
- Changed |= rewritePartitions(AI, P, PJ, PJ, MaxEndOffset,
- PostSplitEndOffset, SplitUses);
- if (PJ == PE)
+ Changed |= rewritePartition(AI, AS, SJ, SJ, MaxEndOffset,
+ PostSplitEndOffset, SplitUses);
+ ++NumPartitions;
+
+ if (SJ == SE)
break; // Skip the rest, we don't need to do any cleanup.
removeFinishedSplitUses(SplitUses, MaxSplitUseEndOffset,
PostSplitEndOffset);
// Now just reset the begin offset for the next iteration.
- BeginOffset = PJ->beginOffset();
+ BeginOffset = SJ->beginOffset();
}
+ NumAllocaPartitions += NumPartitions;
+ MaxPartitionsPerAlloca =
+ std::max<unsigned>(NumPartitions, MaxPartitionsPerAlloca);
+
return Changed;
}
+/// \brief Clobber a use with undef, deleting the used value if it becomes dead.
+void SROA::clobberUse(Use &U) {
+ Value *OldV = U;
+ // Replace the use with an undef value.
+ U = UndefValue::get(OldV->getType());
+
+ // Check for this making an instruction dead. We have to garbage collect
+ // all the dead instructions to ensure the uses of any alloca end up being
+ // minimal.
+ if (Instruction *OldI = dyn_cast<Instruction>(OldV))
+ if (isInstructionTriviallyDead(OldI)) {
+ DeadInsts.insert(OldI);
+ }
+}
+
/// \brief Analyze an alloca for SROA.
///
/// This analyzes the alloca to ensure we can reason about it, builds
-/// a partitioning of the alloca, and then hands it off to be split and
+/// the slices of the alloca, and then hands it off to be split and
/// rewritten as needed.
bool SROA::runOnAlloca(AllocaInst &AI) {
DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
// Skip alloca forms that this analysis can't handle.
if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
- TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
+ DL->getTypeAllocSize(AI.getAllocatedType()) == 0)
return false;
bool Changed = false;
// First, split any FCA loads and stores touching this alloca to promote
// better splitting and promotion opportunities.
- AggLoadStoreRewriter AggRewriter(*TD);
+ AggLoadStoreRewriter AggRewriter(*DL);
Changed |= AggRewriter.rewrite(AI);
- // Build the partition set using a recursive instruction-visiting builder.
- AllocaPartitioning P(*TD, AI);
- DEBUG(P.print(dbgs()));
- if (P.isEscaped())
+ // Build the slices using a recursive instruction-visiting builder.
+ AllocaSlices AS(*DL, AI);
+ DEBUG(AS.print(dbgs()));
+ if (AS.isEscaped())
return Changed;
// Delete all the dead users of this alloca before splitting and rewriting it.
- for (AllocaPartitioning::dead_user_iterator DI = P.dead_user_begin(),
- DE = P.dead_user_end();
- DI != DE; ++DI) {
+ for (Instruction *DeadUser : AS.getDeadUsers()) {
+ // Free up everything used by this instruction.
+ for (Use &DeadOp : DeadUser->operands())
+ clobberUse(DeadOp);
+
+ // Now replace the uses of this instruction.
+ DeadUser->replaceAllUsesWith(UndefValue::get(DeadUser->getType()));
+
+ // And mark it for deletion.
+ DeadInsts.insert(DeadUser);
+ Changed = true;
+ }
+ for (Use *DeadOp : AS.getDeadOperands()) {
+ clobberUse(*DeadOp);
Changed = true;
- (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType()));
- DeadInsts.insert(*DI);
- }
- for (AllocaPartitioning::dead_op_iterator DO = P.dead_op_begin(),
- DE = P.dead_op_end();
- DO != DE; ++DO) {
- Value *OldV = **DO;
- // Clobber the use with an undef value.
- **DO = UndefValue::get(OldV->getType());
- if (Instruction *OldI = dyn_cast<Instruction>(OldV))
- if (isInstructionTriviallyDead(OldI)) {
- Changed = true;
- DeadInsts.insert(OldI);
- }
}
- // No partitions to split. Leave the dead alloca for a later pass to clean up.
- if (P.begin() == P.end())
+ // No slices to split. Leave the dead alloca for a later pass to clean up.
+ if (AS.begin() == AS.end())
return Changed;
- Changed |= splitAlloca(AI, P);
+ Changed |= splitAlloca(AI, AS);
DEBUG(dbgs() << " Speculating PHIs\n");
while (!SpeculatablePHIs.empty())
///
/// We also record the alloca instructions deleted here so that they aren't
/// subsequently handed to mem2reg to promote.
-void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
+void SROA::deleteDeadInstructions(SmallPtrSetImpl<AllocaInst*> &DeletedAllocas) {
while (!DeadInsts.empty()) {
Instruction *I = DeadInsts.pop_back_val();
DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
I->replaceAllUsesWith(UndefValue::get(I->getType()));
- for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
- if (Instruction *U = dyn_cast<Instruction>(*OI)) {
+ for (Use &Operand : I->operands())
+ if (Instruction *U = dyn_cast<Instruction>(Operand)) {
// Zero out the operand and see if it becomes trivially dead.
- *OI = 0;
+ Operand = nullptr;
if (isInstructionTriviallyDead(U))
DeadInsts.insert(U);
}
}
}
+static void enqueueUsersInWorklist(Instruction &I,
+ SmallVectorImpl<Instruction *> &Worklist,
+ SmallPtrSetImpl<Instruction *> &Visited) {
+ for (User *U : I.users())
+ if (Visited.insert(cast<Instruction>(U)))
+ Worklist.push_back(cast<Instruction>(U));
+}
+
/// \brief Promote the allocas, using the best available technique.
///
/// This attempts to promote whatever allocas have been identified as viable in
if (DT && !ForceSSAUpdater) {
DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
- PromoteMemToReg(PromotableAllocas, *DT);
+ PromoteMemToReg(PromotableAllocas, *DT, nullptr, AT);
PromotableAllocas.clear();
return true;
}
DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n");
SSAUpdater SSA;
DIBuilder DIB(*F.getParent());
- SmallVector<Instruction*, 64> Insts;
+ SmallVector<Instruction *, 64> Insts;
+
+ // We need a worklist to walk the uses of each alloca.
+ SmallVector<Instruction *, 8> Worklist;
+ SmallPtrSet<Instruction *, 8> Visited;
+ SmallVector<Instruction *, 32> DeadInsts;
for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) {
AllocaInst *AI = PromotableAllocas[Idx];
- for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
- UI != UE;) {
- Instruction *I = cast<Instruction>(*UI++);
+ Insts.clear();
+ Worklist.clear();
+ Visited.clear();
+
+ enqueueUsersInWorklist(*AI, Worklist, Visited);
+
+ while (!Worklist.empty()) {
+ Instruction *I = Worklist.pop_back_val();
+
// FIXME: Currently the SSAUpdater infrastructure doesn't reason about
// lifetime intrinsics and so we strip them (and the bitcasts+GEPs
// leading to them) here. Eventually it should use them to optimize the
// scalar values produced.
- if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
- assert(onlyUsedByLifetimeMarkers(I) &&
- "Found a bitcast used outside of a lifetime marker.");
- while (!I->use_empty())
- cast<Instruction>(*I->use_begin())->eraseFromParent();
- I->eraseFromParent();
- continue;
- }
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
assert(II->getIntrinsicID() == Intrinsic::lifetime_start ||
II->getIntrinsicID() == Intrinsic::lifetime_end);
continue;
}
- Insts.push_back(I);
+ // Push the loads and stores we find onto the list. SROA will already
+ // have validated that all loads and stores are viable candidates for
+ // promotion.
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ assert(LI->getType() == AI->getAllocatedType());
+ Insts.push_back(LI);
+ continue;
+ }
+ if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
+ assert(SI->getValueOperand()->getType() == AI->getAllocatedType());
+ Insts.push_back(SI);
+ continue;
+ }
+
+ // For everything else, we know that only no-op bitcasts and GEPs will
+ // make it this far, just recurse through them and recall them for later
+ // removal.
+ DeadInsts.push_back(I);
+ enqueueUsersInWorklist(*I, Worklist, Visited);
}
AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts);
- Insts.clear();
+ while (!DeadInsts.empty())
+ DeadInsts.pop_back_val()->eraseFromParent();
+ AI->eraseFromParent();
}
PromotableAllocas.clear();
return true;
}
-namespace {
- /// \brief A predicate to test whether an alloca belongs to a set.
- class IsAllocaInSet {
- typedef SmallPtrSet<AllocaInst *, 4> SetType;
- const SetType &Set;
-
- public:
- typedef AllocaInst *argument_type;
-
- IsAllocaInSet(const SetType &Set) : Set(Set) {}
- bool operator()(AllocaInst *AI) const { return Set.count(AI); }
- };
-}
-
bool SROA::runOnFunction(Function &F) {
+ if (skipOptnoneFunction(F))
+ return false;
+
DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
C = &F.getContext();
- TD = getAnalysisIfAvailable<DataLayout>();
- if (!TD) {
+ DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
+ if (!DLP) {
DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
return false;
}
- DT = getAnalysisIfAvailable<DominatorTree>();
+ DL = &DLP->getDataLayout();
+ DominatorTreeWrapperPass *DTWP =
+ getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ DT = DTWP ? &DTWP->getDomTree() : nullptr;
+ AT = &getAnalysis<AssumptionTracker>();
BasicBlock &EntryBB = F.getEntryBlock();
- for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end());
+ for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end());
I != E; ++I)
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
Worklist.insert(AI);
// Remove the deleted allocas from various lists so that we don't try to
// continue processing them.
if (!DeletedAllocas.empty()) {
- Worklist.remove_if(IsAllocaInSet(DeletedAllocas));
- PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas));
+ auto IsInSet = [&](AllocaInst *AI) {
+ return DeletedAllocas.count(AI);
+ };
+ Worklist.remove_if(IsInSet);
+ PostPromotionWorklist.remove_if(IsInSet);
PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
PromotableAllocas.end(),
- IsAllocaInSet(DeletedAllocas)),
+ IsInSet),
PromotableAllocas.end());
DeletedAllocas.clear();
}
}
void SROA::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<AssumptionTracker>();
if (RequiresDomTree)
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTreeWrapperPass>();
AU.setPreservesCFG();
}