1 //===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This transformation implements the well known scalar replacement of
11 /// aggregates transformation. It tries to identify promotable elements of an
12 /// aggregate alloca, and promote them to registers. It will also try to
13 /// convert uses of an element (or set of elements) of an alloca into a vector
14 /// or bitfield-style integer scalar if appropriate.
16 /// It works to do this with minimal slicing of the alloca so that regions
17 /// which are merely transferred in and out of external memory remain unchanged
18 /// and are not decomposed to scalar code.
20 /// Because this also performs alloca promotion, it can be thought of as also
21 /// serving the purpose of SSA formation. The algorithm iterates on the
22 /// function until all opportunities for promotion have been realized.
24 //===----------------------------------------------------------------------===//
26 #define DEBUG_TYPE "sroa"
27 #include "llvm/Transforms/Scalar.h"
28 #include "llvm/Constants.h"
29 #include "llvm/DIBuilder.h"
30 #include "llvm/DebugInfo.h"
31 #include "llvm/DerivedTypes.h"
32 #include "llvm/Function.h"
33 #include "llvm/IRBuilder.h"
34 #include "llvm/Instructions.h"
35 #include "llvm/IntrinsicInst.h"
36 #include "llvm/LLVMContext.h"
37 #include "llvm/Module.h"
38 #include "llvm/Operator.h"
39 #include "llvm/Pass.h"
40 #include "llvm/ADT/SetVector.h"
41 #include "llvm/ADT/SmallVector.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
44 #include "llvm/Analysis/Dominators.h"
45 #include "llvm/Analysis/Loads.h"
46 #include "llvm/Analysis/ValueTracking.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/GetElementPtrTypeIterator.h"
51 #include "llvm/Support/InstVisitor.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/DataLayout.h"
55 #include "llvm/Transforms/Utils/Local.h"
56 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
57 #include "llvm/Transforms/Utils/SSAUpdater.h"
60 STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
61 STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
62 STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
63 STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
64 STATISTIC(NumDeleted, "Number of instructions deleted");
65 STATISTIC(NumVectorized, "Number of vectorized aggregates");
67 /// Hidden option to force the pass to not use DomTree and mem2reg, instead
68 /// forming SSA values through the SSAUpdater infrastructure.
70 ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden);
73 /// \brief Alloca partitioning representation.
75 /// This class represents a partitioning of an alloca into slices, and
76 /// information about the nature of uses of each slice of the alloca. The goal
77 /// is that this information is sufficient to decide if and how to split the
78 /// alloca apart and replace slices with scalars. It is also intended that this
79 /// structure can capture the relevant information needed both to decide about
80 /// and to enact these transformations.
81 class AllocaPartitioning {
83 /// \brief A common base class for representing a half-open byte range.
85 /// \brief The beginning offset of the range.
88 /// \brief The ending offset, not included in the range.
91 ByteRange() : BeginOffset(), EndOffset() {}
92 ByteRange(uint64_t BeginOffset, uint64_t EndOffset)
93 : BeginOffset(BeginOffset), EndOffset(EndOffset) {}
95 /// \brief Support for ordering ranges.
97 /// This provides an ordering over ranges such that start offsets are
98 /// always increasing, and within equal start offsets, the end offsets are
99 /// decreasing. Thus the spanning range comes first in a cluster with the
100 /// same start position.
101 bool operator<(const ByteRange &RHS) const {
102 if (BeginOffset < RHS.BeginOffset) return true;
103 if (BeginOffset > RHS.BeginOffset) return false;
104 if (EndOffset > RHS.EndOffset) return true;
108 /// \brief Support comparison with a single offset to allow binary searches.
109 friend bool operator<(const ByteRange &LHS, uint64_t RHSOffset) {
110 return LHS.BeginOffset < RHSOffset;
113 friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
114 const ByteRange &RHS) {
115 return LHSOffset < RHS.BeginOffset;
118 bool operator==(const ByteRange &RHS) const {
119 return BeginOffset == RHS.BeginOffset && EndOffset == RHS.EndOffset;
121 bool operator!=(const ByteRange &RHS) const { return !operator==(RHS); }
124 /// \brief A partition of an alloca.
126 /// This structure represents a contiguous partition of the alloca. These are
127 /// formed by examining the uses of the alloca. During formation, they may
128 /// overlap but once an AllocaPartitioning is built, the Partitions within it
129 /// are all disjoint.
130 struct Partition : public ByteRange {
131 /// \brief Whether this partition is splittable into smaller partitions.
133 /// We flag partitions as splittable when they are formed entirely due to
134 /// accesses by trivially splittable operations such as memset and memcpy.
136 /// FIXME: At some point we should consider loads and stores of FCAs to be
137 /// splittable and eagerly split them into scalar values.
140 /// \brief Test whether a partition has been marked as dead.
141 bool isDead() const {
142 if (BeginOffset == UINT64_MAX) {
143 assert(EndOffset == UINT64_MAX);
149 /// \brief Kill a partition.
150 /// This is accomplished by setting both its beginning and end offset to
151 /// the maximum possible value.
153 assert(!isDead() && "He's Dead, Jim!");
154 BeginOffset = EndOffset = UINT64_MAX;
157 Partition() : ByteRange(), IsSplittable() {}
158 Partition(uint64_t BeginOffset, uint64_t EndOffset, bool IsSplittable)
159 : ByteRange(BeginOffset, EndOffset), IsSplittable(IsSplittable) {}
162 /// \brief A particular use of a partition of the alloca.
164 /// This structure is used to associate uses of a partition with it. They
165 /// mark the range of bytes which are referenced by a particular instruction,
166 /// and includes a handle to the user itself and the pointer value in use.
167 /// The bounds of these uses are determined by intersecting the bounds of the
168 /// memory use itself with a particular partition. As a consequence there is
169 /// intentionally overlap between various uses of the same partition.
170 struct PartitionUse : public ByteRange {
171 /// \brief The use in question. Provides access to both user and used value.
173 /// Note that this may be null if the partition use is *dead*, that is, it
174 /// should be ignored.
177 PartitionUse() : ByteRange(), U() {}
178 PartitionUse(uint64_t BeginOffset, uint64_t EndOffset, Use *U)
179 : ByteRange(BeginOffset, EndOffset), U(U) {}
182 /// \brief Construct a partitioning of a particular alloca.
184 /// Construction does most of the work for partitioning the alloca. This
185 /// performs the necessary walks of users and builds a partitioning from it.
186 AllocaPartitioning(const DataLayout &TD, AllocaInst &AI);
188 /// \brief Test whether a pointer to the allocation escapes our analysis.
190 /// If this is true, the partitioning is never fully built and should be
192 bool isEscaped() const { return PointerEscapingInstr; }
194 /// \brief Support for iterating over the partitions.
196 typedef SmallVectorImpl<Partition>::iterator iterator;
197 iterator begin() { return Partitions.begin(); }
198 iterator end() { return Partitions.end(); }
200 typedef SmallVectorImpl<Partition>::const_iterator const_iterator;
201 const_iterator begin() const { return Partitions.begin(); }
202 const_iterator end() const { return Partitions.end(); }
205 /// \brief Support for iterating over and manipulating a particular
206 /// partition's uses.
208 /// The iteration support provided for uses is more limited, but also
209 /// includes some manipulation routines to support rewriting the uses of
210 /// partitions during SROA.
212 typedef SmallVectorImpl<PartitionUse>::iterator use_iterator;
213 use_iterator use_begin(unsigned Idx) { return Uses[Idx].begin(); }
214 use_iterator use_begin(const_iterator I) { return Uses[I - begin()].begin(); }
215 use_iterator use_end(unsigned Idx) { return Uses[Idx].end(); }
216 use_iterator use_end(const_iterator I) { return Uses[I - begin()].end(); }
218 typedef SmallVectorImpl<PartitionUse>::const_iterator const_use_iterator;
219 const_use_iterator use_begin(unsigned Idx) const { return Uses[Idx].begin(); }
220 const_use_iterator use_begin(const_iterator I) const {
221 return Uses[I - begin()].begin();
223 const_use_iterator use_end(unsigned Idx) const { return Uses[Idx].end(); }
224 const_use_iterator use_end(const_iterator I) const {
225 return Uses[I - begin()].end();
228 unsigned use_size(unsigned Idx) const { return Uses[Idx].size(); }
229 unsigned use_size(const_iterator I) const { return Uses[I - begin()].size(); }
230 const PartitionUse &getUse(unsigned PIdx, unsigned UIdx) const {
231 return Uses[PIdx][UIdx];
233 const PartitionUse &getUse(const_iterator I, unsigned UIdx) const {
234 return Uses[I - begin()][UIdx];
237 void use_push_back(unsigned Idx, const PartitionUse &PU) {
238 Uses[Idx].push_back(PU);
240 void use_push_back(const_iterator I, const PartitionUse &PU) {
241 Uses[I - begin()].push_back(PU);
245 /// \brief Allow iterating the dead users for this alloca.
247 /// These are instructions which will never actually use the alloca as they
248 /// are outside the allocated range. They are safe to replace with undef and
251 typedef SmallVectorImpl<Instruction *>::const_iterator dead_user_iterator;
252 dead_user_iterator dead_user_begin() const { return DeadUsers.begin(); }
253 dead_user_iterator dead_user_end() const { return DeadUsers.end(); }
256 /// \brief Allow iterating the dead expressions referring to this alloca.
258 /// These are operands which have cannot actually be used to refer to the
259 /// alloca as they are outside its range and the user doesn't correct for
260 /// that. These mostly consist of PHI node inputs and the like which we just
261 /// need to replace with undef.
263 typedef SmallVectorImpl<Use *>::const_iterator dead_op_iterator;
264 dead_op_iterator dead_op_begin() const { return DeadOperands.begin(); }
265 dead_op_iterator dead_op_end() const { return DeadOperands.end(); }
268 /// \brief MemTransferInst auxiliary data.
269 /// This struct provides some auxiliary data about memory transfer
270 /// intrinsics such as memcpy and memmove. These intrinsics can use two
271 /// different ranges within the same alloca, and provide other challenges to
272 /// correctly represent. We stash extra data to help us untangle this
273 /// after the partitioning is complete.
274 struct MemTransferOffsets {
275 /// The destination begin and end offsets when the destination is within
276 /// this alloca. If the end offset is zero the destination is not within
278 uint64_t DestBegin, DestEnd;
280 /// The source begin and end offsets when the source is within this alloca.
281 /// If the end offset is zero, the source is not within this alloca.
282 uint64_t SourceBegin, SourceEnd;
284 /// Flag for whether an alloca is splittable.
287 MemTransferOffsets getMemTransferOffsets(MemTransferInst &II) const {
288 return MemTransferInstData.lookup(&II);
291 /// \brief Map from a PHI or select operand back to a partition.
293 /// When manipulating PHI nodes or selects, they can use more than one
294 /// partition of an alloca. We store a special mapping to allow finding the
295 /// partition referenced by each of these operands, if any.
296 iterator findPartitionForPHIOrSelectOperand(Use *U) {
297 SmallDenseMap<Use *, std::pair<unsigned, unsigned> >::const_iterator MapIt
298 = PHIOrSelectOpMap.find(U);
299 if (MapIt == PHIOrSelectOpMap.end())
302 return begin() + MapIt->second.first;
305 /// \brief Map from a PHI or select operand back to the specific use of
308 /// Similar to mapping these operands back to the partitions, this maps
309 /// directly to the use structure of that partition.
310 use_iterator findPartitionUseForPHIOrSelectOperand(Use *U) {
311 SmallDenseMap<Use *, std::pair<unsigned, unsigned> >::const_iterator MapIt
312 = PHIOrSelectOpMap.find(U);
313 assert(MapIt != PHIOrSelectOpMap.end());
314 return Uses[MapIt->second.first].begin() + MapIt->second.second;
317 /// \brief Compute a common type among the uses of a particular partition.
319 /// This routines walks all of the uses of a particular partition and tries
320 /// to find a common type between them. Untyped operations such as memset and
321 /// memcpy are ignored.
322 Type *getCommonType(iterator I) const;
324 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
325 void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const;
326 void printUsers(raw_ostream &OS, const_iterator I,
327 StringRef Indent = " ") const;
328 void print(raw_ostream &OS) const;
329 void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const;
330 void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump() const;
334 template <typename DerivedT, typename RetT = void> class BuilderBase;
335 class PartitionBuilder;
336 friend class AllocaPartitioning::PartitionBuilder;
338 friend class AllocaPartitioning::UseBuilder;
341 /// \brief Handle to alloca instruction to simplify method interfaces.
345 /// \brief The instruction responsible for this alloca having no partitioning.
347 /// When an instruction (potentially) escapes the pointer to the alloca, we
348 /// store a pointer to that here and abort trying to partition the alloca.
349 /// This will be null if the alloca is partitioned successfully.
350 Instruction *PointerEscapingInstr;
352 /// \brief The partitions of the alloca.
354 /// We store a vector of the partitions over the alloca here. This vector is
355 /// sorted by increasing begin offset, and then by decreasing end offset. See
356 /// the Partition inner class for more details. Initially (during
357 /// construction) there are overlaps, but we form a disjoint sequence of
358 /// partitions while finishing construction and a fully constructed object is
359 /// expected to always have this as a disjoint space.
360 SmallVector<Partition, 8> Partitions;
362 /// \brief The uses of the partitions.
364 /// This is essentially a mapping from each partition to a list of uses of
365 /// that partition. The mapping is done with a Uses vector that has the exact
366 /// same number of entries as the partition vector. Each entry is itself
367 /// a vector of the uses.
368 SmallVector<SmallVector<PartitionUse, 2>, 8> Uses;
370 /// \brief Instructions which will become dead if we rewrite the alloca.
372 /// Note that these are not separated by partition. This is because we expect
373 /// a partitioned alloca to be completely rewritten or not rewritten at all.
374 /// If rewritten, all these instructions can simply be removed and replaced
375 /// with undef as they come from outside of the allocated space.
376 SmallVector<Instruction *, 8> DeadUsers;
378 /// \brief Operands which will become dead if we rewrite the alloca.
380 /// These are operands that in their particular use can be replaced with
381 /// undef when we rewrite the alloca. These show up in out-of-bounds inputs
382 /// to PHI nodes and the like. They aren't entirely dead (there might be
383 /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we
384 /// want to swap this particular input for undef to simplify the use lists of
386 SmallVector<Use *, 8> DeadOperands;
388 /// \brief The underlying storage for auxiliary memcpy and memset info.
389 SmallDenseMap<MemTransferInst *, MemTransferOffsets, 4> MemTransferInstData;
391 /// \brief A side datastructure used when building up the partitions and uses.
393 /// This mapping is only really used during the initial building of the
394 /// partitioning so that we can retain information about PHI and select nodes
396 SmallDenseMap<Instruction *, std::pair<uint64_t, bool> > PHIOrSelectSizes;
398 /// \brief Auxiliary information for particular PHI or select operands.
399 SmallDenseMap<Use *, std::pair<unsigned, unsigned>, 4> PHIOrSelectOpMap;
401 /// \brief A utility routine called from the constructor.
403 /// This does what it says on the tin. It is the key of the alloca partition
404 /// splitting and merging. After it is called we have the desired disjoint
405 /// collection of partitions.
406 void splitAndMergePartitions();
410 template <typename DerivedT, typename RetT>
411 class AllocaPartitioning::BuilderBase
412 : public InstVisitor<DerivedT, RetT> {
414 BuilderBase(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
416 AllocSize(TD.getTypeAllocSize(AI.getAllocatedType())),
422 const DataLayout &TD;
423 const uint64_t AllocSize;
424 AllocaPartitioning &P;
426 SmallPtrSet<Use *, 8> VisitedUses;
432 SmallVector<OffsetUse, 8> Queue;
434 // The active offset and use while visiting.
438 void enqueueUsers(Instruction &I, int64_t UserOffset) {
439 for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
441 if (VisitedUses.insert(&UI.getUse())) {
442 OffsetUse OU = { &UI.getUse(), UserOffset };
448 bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) {
450 for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI);
452 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
458 // Handle a struct index, which adds its field offset to the pointer.
459 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
460 unsigned ElementIdx = OpC->getZExtValue();
461 const StructLayout *SL = TD.getStructLayout(STy);
462 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
463 // Check that we can continue to model this GEP in a signed 64-bit offset.
464 if (ElementOffset > INT64_MAX ||
466 ((uint64_t)GEPOffset + ElementOffset) > INT64_MAX)) {
467 DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
468 << "what can be represented in an int64_t!\n"
469 << " alloca: " << P.AI << "\n");
473 GEPOffset = ElementOffset + (uint64_t)-GEPOffset;
475 GEPOffset += ElementOffset;
479 APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits());
480 Index *= APInt(Index.getBitWidth(),
481 TD.getTypeAllocSize(GTI.getIndexedType()));
482 Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset,
484 // Check if the result can be stored in our int64_t offset.
485 if (!Index.isSignedIntN(sizeof(GEPOffset) * 8)) {
486 DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
487 << "what can be represented in an int64_t!\n"
488 << " alloca: " << P.AI << "\n");
492 GEPOffset = Index.getSExtValue();
497 Value *foldSelectInst(SelectInst &SI) {
498 // If the condition being selected on is a constant or the same value is
499 // being selected between, fold the select. Yes this does (rarely) happen
501 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
502 return SI.getOperand(1+CI->isZero());
503 if (SI.getOperand(1) == SI.getOperand(2)) {
504 assert(*U == SI.getOperand(1));
505 return SI.getOperand(1);
511 /// \brief Builder for the alloca partitioning.
513 /// This class builds an alloca partitioning by recursively visiting the uses
514 /// of an alloca and splitting the partitions for each load and store at each
516 class AllocaPartitioning::PartitionBuilder
517 : public BuilderBase<PartitionBuilder, bool> {
518 friend class InstVisitor<PartitionBuilder, bool>;
520 SmallDenseMap<Instruction *, unsigned> MemTransferPartitionMap;
523 PartitionBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
524 : BuilderBase<PartitionBuilder, bool>(TD, AI, P) {}
526 /// \brief Run the builder over the allocation.
528 // Note that we have to re-evaluate size on each trip through the loop as
529 // the queue grows at the tail.
530 for (unsigned Idx = 0; Idx < Queue.size(); ++Idx) {
532 Offset = Queue[Idx].Offset;
533 if (!visit(cast<Instruction>(U->getUser())))
540 bool markAsEscaping(Instruction &I) {
541 P.PointerEscapingInstr = &I;
545 void insertUse(Instruction &I, int64_t Offset, uint64_t Size,
546 bool IsSplittable = false) {
547 // Completely skip uses which have a zero size or don't overlap the
550 (Offset >= 0 && (uint64_t)Offset >= AllocSize) ||
551 (Offset < 0 && (uint64_t)-Offset >= Size)) {
552 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
553 << " which starts past the end of the " << AllocSize
555 << " alloca: " << P.AI << "\n"
556 << " use: " << I << "\n");
560 // Clamp the start to the beginning of the allocation.
562 DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
563 << " to start at the beginning of the alloca:\n"
564 << " alloca: " << P.AI << "\n"
565 << " use: " << I << "\n");
566 Size -= (uint64_t)-Offset;
570 uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
572 // Clamp the end offset to the end of the allocation. Note that this is
573 // formulated to handle even the case where "BeginOffset + Size" overflows.
574 assert(AllocSize >= BeginOffset); // Established above.
575 if (Size > AllocSize - BeginOffset) {
576 DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
577 << " to remain within the " << AllocSize << " byte alloca:\n"
578 << " alloca: " << P.AI << "\n"
579 << " use: " << I << "\n");
580 EndOffset = AllocSize;
583 Partition New(BeginOffset, EndOffset, IsSplittable);
584 P.Partitions.push_back(New);
587 bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) {
588 uint64_t Size = TD.getTypeStoreSize(Ty);
590 // If this memory access can be shown to *statically* extend outside the
591 // bounds of of the allocation, it's behavior is undefined, so simply
592 // ignore it. Note that this is more strict than the generic clamping
593 // behavior of insertUse. We also try to handle cases which might run the
595 // FIXME: We should instead consider the pointer to have escaped if this
596 // function is being instrumented for addressing bugs or race conditions.
597 if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
598 Size > (AllocSize - (uint64_t)Offset)) {
599 DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte "
600 << (isa<LoadInst>(I) ? "load" : "store") << " @" << Offset
601 << " which extends past the end of the " << AllocSize
603 << " alloca: " << P.AI << "\n"
604 << " use: " << I << "\n");
608 insertUse(I, Offset, Size);
612 bool visitBitCastInst(BitCastInst &BC) {
613 enqueueUsers(BC, Offset);
617 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
619 if (!computeConstantGEPOffset(GEPI, GEPOffset))
620 return markAsEscaping(GEPI);
622 enqueueUsers(GEPI, GEPOffset);
626 bool visitLoadInst(LoadInst &LI) {
627 assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
628 "All simple FCA loads should have been pre-split");
629 return handleLoadOrStore(LI.getType(), LI, Offset);
632 bool visitStoreInst(StoreInst &SI) {
633 Value *ValOp = SI.getValueOperand();
635 return markAsEscaping(SI);
637 assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
638 "All simple FCA stores should have been pre-split");
639 return handleLoadOrStore(ValOp->getType(), SI, Offset);
643 bool visitMemSetInst(MemSetInst &II) {
644 assert(II.getRawDest() == *U && "Pointer use is not the destination?");
645 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
646 uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
647 insertUse(II, Offset, Size, Length);
651 bool visitMemTransferInst(MemTransferInst &II) {
652 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
653 uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
655 // Zero-length mem transfer intrinsics can be ignored entirely.
658 MemTransferOffsets &Offsets = P.MemTransferInstData[&II];
660 // Only intrinsics with a constant length can be split.
661 Offsets.IsSplittable = Length;
663 if (*U == II.getRawDest()) {
664 Offsets.DestBegin = Offset;
665 Offsets.DestEnd = Offset + Size;
667 if (*U == II.getRawSource()) {
668 Offsets.SourceBegin = Offset;
669 Offsets.SourceEnd = Offset + Size;
672 // If we have set up end offsets for both the source and the destination,
673 // we have found both sides of this transfer pointing at the same alloca.
674 bool SeenBothEnds = Offsets.SourceEnd && Offsets.DestEnd;
675 if (SeenBothEnds && II.getRawDest() != II.getRawSource()) {
676 unsigned PrevIdx = MemTransferPartitionMap[&II];
678 // Check if the begin offsets match and this is a non-volatile transfer.
679 // In that case, we can completely elide the transfer.
680 if (!II.isVolatile() && Offsets.SourceBegin == Offsets.DestBegin) {
681 P.Partitions[PrevIdx].kill();
685 // Otherwise we have an offset transfer within the same alloca. We can't
687 P.Partitions[PrevIdx].IsSplittable = Offsets.IsSplittable = false;
688 } else if (SeenBothEnds) {
689 // Handle the case where this exact use provides both ends of the
691 assert(II.getRawDest() == II.getRawSource());
693 // For non-volatile transfers this is a no-op.
694 if (!II.isVolatile())
697 // Otherwise just suppress splitting.
698 Offsets.IsSplittable = false;
702 // Insert the use now that we've fixed up the splittable nature.
703 insertUse(II, Offset, Size, Offsets.IsSplittable);
705 // Setup the mapping from intrinsic to partition of we've not seen both
706 // ends of this transfer.
708 unsigned NewIdx = P.Partitions.size() - 1;
710 = MemTransferPartitionMap.insert(std::make_pair(&II, NewIdx)).second;
712 "Already have intrinsic in map but haven't seen both ends");
719 // Disable SRoA for any intrinsics except for lifetime invariants.
720 // FIXME: What about debug instrinsics? This matches old behavior, but
721 // doesn't make sense.
722 bool visitIntrinsicInst(IntrinsicInst &II) {
723 if (II.getIntrinsicID() == Intrinsic::lifetime_start ||
724 II.getIntrinsicID() == Intrinsic::lifetime_end) {
725 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
726 uint64_t Size = std::min(AllocSize - Offset, Length->getLimitedValue());
727 insertUse(II, Offset, Size, true);
731 return markAsEscaping(II);
734 Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
735 // We consider any PHI or select that results in a direct load or store of
736 // the same offset to be a viable use for partitioning purposes. These uses
737 // are considered unsplittable and the size is the maximum loaded or stored
739 SmallPtrSet<Instruction *, 4> Visited;
740 SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses;
741 Visited.insert(Root);
742 Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
743 // If there are no loads or stores, the access is dead. We mark that as
744 // a size zero access.
747 Instruction *I, *UsedI;
748 llvm::tie(UsedI, I) = Uses.pop_back_val();
750 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
751 Size = std::max(Size, TD.getTypeStoreSize(LI->getType()));
754 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
755 Value *Op = SI->getOperand(0);
758 Size = std::max(Size, TD.getTypeStoreSize(Op->getType()));
762 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
763 if (!GEP->hasAllZeroIndices())
765 } else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) &&
766 !isa<SelectInst>(I)) {
770 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
772 if (Visited.insert(cast<Instruction>(*UI)))
773 Uses.push_back(std::make_pair(I, cast<Instruction>(*UI)));
774 } while (!Uses.empty());
779 bool visitPHINode(PHINode &PN) {
780 // See if we already have computed info on this node.
781 std::pair<uint64_t, bool> &PHIInfo = P.PHIOrSelectSizes[&PN];
783 PHIInfo.second = true;
784 insertUse(PN, Offset, PHIInfo.first);
788 // Check for an unsafe use of the PHI node.
789 if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&PN, PHIInfo.first))
790 return markAsEscaping(*EscapingI);
792 insertUse(PN, Offset, PHIInfo.first);
796 bool visitSelectInst(SelectInst &SI) {
797 if (Value *Result = foldSelectInst(SI)) {
799 // If the result of the constant fold will be the pointer, recurse
800 // through the select as if we had RAUW'ed it.
801 enqueueUsers(SI, Offset);
806 // See if we already have computed info on this node.
807 std::pair<uint64_t, bool> &SelectInfo = P.PHIOrSelectSizes[&SI];
808 if (SelectInfo.first) {
809 SelectInfo.second = true;
810 insertUse(SI, Offset, SelectInfo.first);
814 // Check for an unsafe use of the PHI node.
815 if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&SI, SelectInfo.first))
816 return markAsEscaping(*EscapingI);
818 insertUse(SI, Offset, SelectInfo.first);
822 /// \brief Disable SROA entirely if there are unhandled users of the alloca.
823 bool visitInstruction(Instruction &I) { return markAsEscaping(I); }
827 /// \brief Use adder for the alloca partitioning.
829 /// This class adds the uses of an alloca to all of the partitions which they
830 /// use. For splittable partitions, this can end up doing essentially a linear
831 /// walk of the partitions, but the number of steps remains bounded by the
832 /// total result instruction size:
833 /// - The number of partitions is a result of the number unsplittable
834 /// instructions using the alloca.
835 /// - The number of users of each partition is at worst the total number of
836 /// splittable instructions using the alloca.
837 /// Thus we will produce N * M instructions in the end, where N are the number
838 /// of unsplittable uses and M are the number of splittable. This visitor does
839 /// the exact same number of updates to the partitioning.
841 /// In the more common case, this visitor will leverage the fact that the
842 /// partition space is pre-sorted, and do a logarithmic search for the
843 /// partition needed, making the total visit a classical ((N + M) * log(N))
844 /// complexity operation.
845 class AllocaPartitioning::UseBuilder : public BuilderBase<UseBuilder> {
846 friend class InstVisitor<UseBuilder>;
848 /// \brief Set to de-duplicate dead instructions found in the use walk.
849 SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
852 UseBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
853 : BuilderBase<UseBuilder>(TD, AI, P) {}
855 /// \brief Run the builder over the allocation.
857 // Note that we have to re-evaluate size on each trip through the loop as
858 // the queue grows at the tail.
859 for (unsigned Idx = 0; Idx < Queue.size(); ++Idx) {
861 Offset = Queue[Idx].Offset;
862 this->visit(cast<Instruction>(U->getUser()));
867 void markAsDead(Instruction &I) {
868 if (VisitedDeadInsts.insert(&I))
869 P.DeadUsers.push_back(&I);
872 void insertUse(Instruction &User, int64_t Offset, uint64_t Size) {
873 // If the use has a zero size or extends outside of the allocation, record
874 // it as a dead use for elimination later.
875 if (Size == 0 || (uint64_t)Offset >= AllocSize ||
876 (Offset < 0 && (uint64_t)-Offset >= Size))
877 return markAsDead(User);
879 // Clamp the start to the beginning of the allocation.
881 Size -= (uint64_t)-Offset;
885 uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
887 // Clamp the end offset to the end of the allocation. Note that this is
888 // formulated to handle even the case where "BeginOffset + Size" overflows.
889 assert(AllocSize >= BeginOffset); // Established above.
890 if (Size > AllocSize - BeginOffset)
891 EndOffset = AllocSize;
893 // NB: This only works if we have zero overlapping partitions.
894 iterator B = std::lower_bound(P.begin(), P.end(), BeginOffset);
895 if (B != P.begin() && llvm::prior(B)->EndOffset > BeginOffset)
897 for (iterator I = B, E = P.end(); I != E && I->BeginOffset < EndOffset;
899 PartitionUse NewPU(std::max(I->BeginOffset, BeginOffset),
900 std::min(I->EndOffset, EndOffset), U);
901 P.use_push_back(I, NewPU);
902 if (isa<PHINode>(U->getUser()) || isa<SelectInst>(U->getUser()))
903 P.PHIOrSelectOpMap[U]
904 = std::make_pair(I - P.begin(), P.Uses[I - P.begin()].size() - 1);
908 void handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) {
909 uint64_t Size = TD.getTypeStoreSize(Ty);
911 // If this memory access can be shown to *statically* extend outside the
912 // bounds of of the allocation, it's behavior is undefined, so simply
913 // ignore it. Note that this is more strict than the generic clamping
914 // behavior of insertUse.
915 if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
916 Size > (AllocSize - (uint64_t)Offset))
917 return markAsDead(I);
919 insertUse(I, Offset, Size);
922 void visitBitCastInst(BitCastInst &BC) {
924 return markAsDead(BC);
926 enqueueUsers(BC, Offset);
929 void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
930 if (GEPI.use_empty())
931 return markAsDead(GEPI);
934 if (!computeConstantGEPOffset(GEPI, GEPOffset))
935 llvm_unreachable("Unable to compute constant offset for use");
937 enqueueUsers(GEPI, GEPOffset);
940 void visitLoadInst(LoadInst &LI) {
941 handleLoadOrStore(LI.getType(), LI, Offset);
944 void visitStoreInst(StoreInst &SI) {
945 handleLoadOrStore(SI.getOperand(0)->getType(), SI, Offset);
948 void visitMemSetInst(MemSetInst &II) {
949 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
950 uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
951 insertUse(II, Offset, Size);
954 void visitMemTransferInst(MemTransferInst &II) {
955 ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
956 uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
958 return markAsDead(II);
960 MemTransferOffsets &Offsets = P.MemTransferInstData[&II];
961 if (!II.isVolatile() && Offsets.DestEnd && Offsets.SourceEnd &&
962 Offsets.DestBegin == Offsets.SourceBegin)
963 return markAsDead(II); // Skip identity transfers without side-effects.
965 insertUse(II, Offset, Size);
968 void visitIntrinsicInst(IntrinsicInst &II) {
969 assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
970 II.getIntrinsicID() == Intrinsic::lifetime_end);
972 ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
973 insertUse(II, Offset,
974 std::min(AllocSize - Offset, Length->getLimitedValue()));
977 void insertPHIOrSelect(Instruction &User, uint64_t Offset) {
978 uint64_t Size = P.PHIOrSelectSizes.lookup(&User).first;
980 // For PHI and select operands outside the alloca, we can't nuke the entire
981 // phi or select -- the other side might still be relevant, so we special
982 // case them here and use a separate structure to track the operands
983 // themselves which should be replaced with undef.
984 if (Offset >= AllocSize) {
985 P.DeadOperands.push_back(U);
989 insertUse(User, Offset, Size);
991 void visitPHINode(PHINode &PN) {
993 return markAsDead(PN);
995 insertPHIOrSelect(PN, Offset);
997 void visitSelectInst(SelectInst &SI) {
999 return markAsDead(SI);
1001 if (Value *Result = foldSelectInst(SI)) {
1003 // If the result of the constant fold will be the pointer, recurse
1004 // through the select as if we had RAUW'ed it.
1005 enqueueUsers(SI, Offset);
1007 // Otherwise the operand to the select is dead, and we can replace it
1009 P.DeadOperands.push_back(U);
1014 insertPHIOrSelect(SI, Offset);
1017 /// \brief Unreachable, we've already visited the alloca once.
1018 void visitInstruction(Instruction &I) {
1019 llvm_unreachable("Unhandled instruction in use builder.");
1023 void AllocaPartitioning::splitAndMergePartitions() {
1024 size_t NumDeadPartitions = 0;
1026 // Track the range of splittable partitions that we pass when accumulating
1027 // overlapping unsplittable partitions.
1028 uint64_t SplitEndOffset = 0ull;
1030 Partition New(0ull, 0ull, false);
1032 for (unsigned i = 0, j = i, e = Partitions.size(); i != e; i = j) {
1035 if (!Partitions[i].IsSplittable || New.BeginOffset == New.EndOffset) {
1036 assert(New.BeginOffset == New.EndOffset);
1037 New = Partitions[i];
1039 assert(New.IsSplittable);
1040 New.EndOffset = std::max(New.EndOffset, Partitions[i].EndOffset);
1042 assert(New.BeginOffset != New.EndOffset);
1044 // Scan the overlapping partitions.
1045 while (j != e && New.EndOffset > Partitions[j].BeginOffset) {
1046 // If the new partition we are forming is splittable, stop at the first
1047 // unsplittable partition.
1048 if (New.IsSplittable && !Partitions[j].IsSplittable)
1051 // Grow the new partition to include any equally splittable range. 'j' is
1052 // always equally splittable when New is splittable, but when New is not
1053 // splittable, we may subsume some (or part of some) splitable partition
1054 // without growing the new one.
1055 if (New.IsSplittable == Partitions[j].IsSplittable) {
1056 New.EndOffset = std::max(New.EndOffset, Partitions[j].EndOffset);
1058 assert(!New.IsSplittable);
1059 assert(Partitions[j].IsSplittable);
1060 SplitEndOffset = std::max(SplitEndOffset, Partitions[j].EndOffset);
1063 Partitions[j].kill();
1064 ++NumDeadPartitions;
1068 // If the new partition is splittable, chop off the end as soon as the
1069 // unsplittable subsequent partition starts and ensure we eventually cover
1070 // the splittable area.
1071 if (j != e && New.IsSplittable) {
1072 SplitEndOffset = std::max(SplitEndOffset, New.EndOffset);
1073 New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset);
1076 // Add the new partition if it differs from the original one and is
1077 // non-empty. We can end up with an empty partition here if it was
1078 // splittable but there is an unsplittable one that starts at the same
1080 if (New != Partitions[i]) {
1081 if (New.BeginOffset != New.EndOffset)
1082 Partitions.push_back(New);
1083 // Mark the old one for removal.
1084 Partitions[i].kill();
1085 ++NumDeadPartitions;
1088 New.BeginOffset = New.EndOffset;
1089 if (!New.IsSplittable) {
1090 New.EndOffset = std::max(New.EndOffset, SplitEndOffset);
1091 if (j != e && !Partitions[j].IsSplittable)
1092 New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset);
1093 New.IsSplittable = true;
1094 // If there is a trailing splittable partition which won't be fused into
1095 // the next splittable partition go ahead and add it onto the partitions
1097 if (New.BeginOffset < New.EndOffset &&
1098 (j == e || !Partitions[j].IsSplittable ||
1099 New.EndOffset < Partitions[j].BeginOffset)) {
1100 Partitions.push_back(New);
1101 New.BeginOffset = New.EndOffset = 0ull;
1106 // Re-sort the partitions now that they have been split and merged into
1107 // disjoint set of partitions. Also remove any of the dead partitions we've
1108 // replaced in the process.
1109 std::sort(Partitions.begin(), Partitions.end());
1110 if (NumDeadPartitions) {
1111 assert(Partitions.back().isDead());
1112 assert((ptrdiff_t)NumDeadPartitions ==
1113 std::count(Partitions.begin(), Partitions.end(), Partitions.back()));
1115 Partitions.erase(Partitions.end() - NumDeadPartitions, Partitions.end());
1118 AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI)
1123 PointerEscapingInstr(0) {
1124 PartitionBuilder PB(TD, AI, *this);
1128 // Sort the uses. This arranges for the offsets to be in ascending order,
1129 // and the sizes to be in descending order.
1130 std::sort(Partitions.begin(), Partitions.end());
1132 // Remove any partitions from the back which are marked as dead.
1133 while (!Partitions.empty() && Partitions.back().isDead())
1134 Partitions.pop_back();
1136 if (Partitions.size() > 1) {
1137 // Intersect splittability for all partitions with equal offsets and sizes.
1138 // Then remove all but the first so that we have a sequence of non-equal but
1139 // potentially overlapping partitions.
1140 for (iterator I = Partitions.begin(), J = I, E = Partitions.end(); I != E;
1143 while (J != E && *I == *J) {
1144 I->IsSplittable &= J->IsSplittable;
1148 Partitions.erase(std::unique(Partitions.begin(), Partitions.end()),
1151 // Split splittable and merge unsplittable partitions into a disjoint set
1152 // of partitions over the used space of the allocation.
1153 splitAndMergePartitions();
1156 // Now build up the user lists for each of these disjoint partitions by
1157 // re-walking the recursive users of the alloca.
1158 Uses.resize(Partitions.size());
1159 UseBuilder UB(TD, AI, *this);
1163 Type *AllocaPartitioning::getCommonType(iterator I) const {
1165 for (const_use_iterator UI = use_begin(I), UE = use_end(I); UI != UE; ++UI) {
1167 continue; // Skip dead uses.
1168 if (isa<IntrinsicInst>(*UI->U->getUser()))
1170 if (UI->BeginOffset != I->BeginOffset || UI->EndOffset != I->EndOffset)
1174 if (LoadInst *LI = dyn_cast<LoadInst>(UI->U->getUser())) {
1175 UserTy = LI->getType();
1176 } else if (StoreInst *SI = dyn_cast<StoreInst>(UI->U->getUser())) {
1177 UserTy = SI->getValueOperand()->getType();
1180 if (Ty && Ty != UserTy)
1188 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1190 void AllocaPartitioning::print(raw_ostream &OS, const_iterator I,
1191 StringRef Indent) const {
1192 OS << Indent << "partition #" << (I - begin())
1193 << " [" << I->BeginOffset << "," << I->EndOffset << ")"
1194 << (I->IsSplittable ? " (splittable)" : "")
1195 << (Uses[I - begin()].empty() ? " (zero uses)" : "")
1199 void AllocaPartitioning::printUsers(raw_ostream &OS, const_iterator I,
1200 StringRef Indent) const {
1201 for (const_use_iterator UI = use_begin(I), UE = use_end(I);
1204 continue; // Skip dead uses.
1205 OS << Indent << " [" << UI->BeginOffset << "," << UI->EndOffset << ") "
1206 << "used by: " << *UI->U->getUser() << "\n";
1207 if (MemTransferInst *II = dyn_cast<MemTransferInst>(UI->U->getUser())) {
1208 const MemTransferOffsets &MTO = MemTransferInstData.lookup(II);
1210 if (!MTO.IsSplittable)
1211 IsDest = UI->BeginOffset == MTO.DestBegin;
1213 IsDest = MTO.DestBegin != 0u;
1214 OS << Indent << " (original " << (IsDest ? "dest" : "source") << ": "
1215 << "[" << (IsDest ? MTO.DestBegin : MTO.SourceBegin)
1216 << "," << (IsDest ? MTO.DestEnd : MTO.SourceEnd) << ")\n";
1221 void AllocaPartitioning::print(raw_ostream &OS) const {
1222 if (PointerEscapingInstr) {
1223 OS << "No partitioning for alloca: " << AI << "\n"
1224 << " A pointer to this alloca escaped by:\n"
1225 << " " << *PointerEscapingInstr << "\n";
1229 OS << "Partitioning of alloca: " << AI << "\n";
1231 for (const_iterator I = begin(), E = end(); I != E; ++I, ++Num) {
1237 void AllocaPartitioning::dump(const_iterator I) const { print(dbgs(), I); }
1238 void AllocaPartitioning::dump() const { print(dbgs()); }
1240 #endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1244 /// \brief Implementation of LoadAndStorePromoter for promoting allocas.
1246 /// This subclass of LoadAndStorePromoter adds overrides to handle promoting
1247 /// the loads and stores of an alloca instruction, as well as updating its
1248 /// debug information. This is used when a domtree is unavailable and thus
1249 /// mem2reg in its full form can't be used to handle promotion of allocas to
1251 class AllocaPromoter : public LoadAndStorePromoter {
1255 SmallVector<DbgDeclareInst *, 4> DDIs;
1256 SmallVector<DbgValueInst *, 4> DVIs;
1259 AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
1260 AllocaInst &AI, DIBuilder &DIB)
1261 : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
1263 void run(const SmallVectorImpl<Instruction*> &Insts) {
1264 // Remember which alloca we're promoting (for isInstInList).
1265 if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) {
1266 for (Value::use_iterator UI = DebugNode->use_begin(),
1267 UE = DebugNode->use_end();
1269 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
1270 DDIs.push_back(DDI);
1271 else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
1272 DVIs.push_back(DVI);
1275 LoadAndStorePromoter::run(Insts);
1276 AI.eraseFromParent();
1277 while (!DDIs.empty())
1278 DDIs.pop_back_val()->eraseFromParent();
1279 while (!DVIs.empty())
1280 DVIs.pop_back_val()->eraseFromParent();
1283 virtual bool isInstInList(Instruction *I,
1284 const SmallVectorImpl<Instruction*> &Insts) const {
1285 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1286 return LI->getOperand(0) == &AI;
1287 return cast<StoreInst>(I)->getPointerOperand() == &AI;
1290 virtual void updateDebugInfo(Instruction *Inst) const {
1291 for (SmallVector<DbgDeclareInst *, 4>::const_iterator I = DDIs.begin(),
1292 E = DDIs.end(); I != E; ++I) {
1293 DbgDeclareInst *DDI = *I;
1294 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
1295 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1296 else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
1297 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1299 for (SmallVector<DbgValueInst *, 4>::const_iterator I = DVIs.begin(),
1300 E = DVIs.end(); I != E; ++I) {
1301 DbgValueInst *DVI = *I;
1303 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1304 // If an argument is zero extended then use argument directly. The ZExt
1305 // may be zapped by an optimization pass in future.
1306 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
1307 Arg = dyn_cast<Argument>(ZExt->getOperand(0));
1308 if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
1309 Arg = dyn_cast<Argument>(SExt->getOperand(0));
1311 Arg = SI->getOperand(0);
1312 } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
1313 Arg = LI->getOperand(0);
1317 Instruction *DbgVal =
1318 DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
1320 DbgVal->setDebugLoc(DVI->getDebugLoc());
1324 } // end anon namespace
1328 /// \brief An optimization pass providing Scalar Replacement of Aggregates.
1330 /// This pass takes allocations which can be completely analyzed (that is, they
1331 /// don't escape) and tries to turn them into scalar SSA values. There are
1332 /// a few steps to this process.
1334 /// 1) It takes allocations of aggregates and analyzes the ways in which they
1335 /// are used to try to split them into smaller allocations, ideally of
1336 /// a single scalar data type. It will split up memcpy and memset accesses
1337 /// as necessary and try to isolate invidual scalar accesses.
1338 /// 2) It will transform accesses into forms which are suitable for SSA value
1339 /// promotion. This can be replacing a memset with a scalar store of an
1340 /// integer value, or it can involve speculating operations on a PHI or
1341 /// select to be a PHI or select of the results.
1342 /// 3) Finally, this will try to detect a pattern of accesses which map cleanly
1343 /// onto insert and extract operations on a vector value, and convert them to
1344 /// this form. By doing so, it will enable promotion of vector aggregates to
1345 /// SSA vector values.
1346 class SROA : public FunctionPass {
1347 const bool RequiresDomTree;
1350 const DataLayout *TD;
1353 /// \brief Worklist of alloca instructions to simplify.
1355 /// Each alloca in the function is added to this. Each new alloca formed gets
1356 /// added to it as well to recursively simplify unless that alloca can be
1357 /// directly promoted. Finally, each time we rewrite a use of an alloca other
1358 /// the one being actively rewritten, we add it back onto the list if not
1359 /// already present to ensure it is re-visited.
1360 SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > Worklist;
1362 /// \brief A collection of instructions to delete.
1363 /// We try to batch deletions to simplify code and make things a bit more
1365 SmallVector<Instruction *, 8> DeadInsts;
1367 /// \brief A set to prevent repeatedly marking an instruction split into many
1368 /// uses as dead. Only used to guard insertion into DeadInsts.
1369 SmallPtrSet<Instruction *, 4> DeadSplitInsts;
1371 /// \brief Post-promotion worklist.
1373 /// Sometimes we discover an alloca which has a high probability of becoming
1374 /// viable for SROA after a round of promotion takes place. In those cases,
1375 /// the alloca is enqueued here for re-processing.
1377 /// Note that we have to be very careful to clear allocas out of this list in
1378 /// the event they are deleted.
1379 SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > PostPromotionWorklist;
1381 /// \brief A collection of alloca instructions we can directly promote.
1382 std::vector<AllocaInst *> PromotableAllocas;
1385 SROA(bool RequiresDomTree = true)
1386 : FunctionPass(ID), RequiresDomTree(RequiresDomTree),
1387 C(0), TD(0), DT(0) {
1388 initializeSROAPass(*PassRegistry::getPassRegistry());
1390 bool runOnFunction(Function &F);
1391 void getAnalysisUsage(AnalysisUsage &AU) const;
1393 const char *getPassName() const { return "SROA"; }
1397 friend class PHIOrSelectSpeculator;
1398 friend class AllocaPartitionRewriter;
1399 friend class AllocaPartitionVectorRewriter;
1401 bool rewriteAllocaPartition(AllocaInst &AI,
1402 AllocaPartitioning &P,
1403 AllocaPartitioning::iterator PI);
1404 bool splitAlloca(AllocaInst &AI, AllocaPartitioning &P);
1405 bool runOnAlloca(AllocaInst &AI);
1406 void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas);
1407 bool promoteAllocas(Function &F);
1413 FunctionPass *llvm::createSROAPass(bool RequiresDomTree) {
1414 return new SROA(RequiresDomTree);
1417 INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates",
1419 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
1420 INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
1424 /// \brief Visitor to speculate PHIs and Selects where possible.
1425 class PHIOrSelectSpeculator : public InstVisitor<PHIOrSelectSpeculator> {
1426 // Befriend the base class so it can delegate to private visit methods.
1427 friend class llvm::InstVisitor<PHIOrSelectSpeculator>;
1429 const DataLayout &TD;
1430 AllocaPartitioning &P;
1434 PHIOrSelectSpeculator(const DataLayout &TD, AllocaPartitioning &P, SROA &Pass)
1435 : TD(TD), P(P), Pass(Pass) {}
1437 /// \brief Visit the users of an alloca partition and rewrite them.
1438 void visitUsers(AllocaPartitioning::const_iterator PI) {
1439 // Note that we need to use an index here as the underlying vector of uses
1440 // may be grown during speculation. However, we never need to re-visit the
1441 // new uses, and so we can use the initial size bound.
1442 for (unsigned Idx = 0, Size = P.use_size(PI); Idx != Size; ++Idx) {
1443 const AllocaPartitioning::PartitionUse &PU = P.getUse(PI, Idx);
1445 continue; // Skip dead use.
1447 visit(cast<Instruction>(PU.U->getUser()));
1452 // By default, skip this instruction.
1453 void visitInstruction(Instruction &I) {}
1455 /// PHI instructions that use an alloca and are subsequently loaded can be
1456 /// rewritten to load both input pointers in the pred blocks and then PHI the
1457 /// results, allowing the load of the alloca to be promoted.
1459 /// %P2 = phi [i32* %Alloca, i32* %Other]
1460 /// %V = load i32* %P2
1462 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1464 /// %V2 = load i32* %Other
1466 /// %V = phi [i32 %V1, i32 %V2]
1468 /// We can do this to a select if its only uses are loads and if the operands
1469 /// to the select can be loaded unconditionally.
1471 /// FIXME: This should be hoisted into a generic utility, likely in
1472 /// Transforms/Util/Local.h
1473 bool isSafePHIToSpeculate(PHINode &PN, SmallVectorImpl<LoadInst *> &Loads) {
1474 // For now, we can only do this promotion if the load is in the same block
1475 // as the PHI, and if there are no stores between the phi and load.
1476 // TODO: Allow recursive phi users.
1477 // TODO: Allow stores.
1478 BasicBlock *BB = PN.getParent();
1479 unsigned MaxAlign = 0;
1480 for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end();
1482 LoadInst *LI = dyn_cast<LoadInst>(*UI);
1483 if (LI == 0 || !LI->isSimple()) return false;
1485 // For now we only allow loads in the same block as the PHI. This is
1486 // a common case that happens when instcombine merges two loads through
1488 if (LI->getParent() != BB) return false;
1490 // Ensure that there are no instructions between the PHI and the load that
1492 for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI)
1493 if (BBI->mayWriteToMemory())
1496 MaxAlign = std::max(MaxAlign, LI->getAlignment());
1497 Loads.push_back(LI);
1500 // We can only transform this if it is safe to push the loads into the
1501 // predecessor blocks. The only thing to watch out for is that we can't put
1502 // a possibly trapping load in the predecessor if it is a critical edge.
1503 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num;
1505 TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator();
1506 Value *InVal = PN.getIncomingValue(Idx);
1508 // If the value is produced by the terminator of the predecessor (an
1509 // invoke) or it has side-effects, there is no valid place to put a load
1510 // in the predecessor.
1511 if (TI == InVal || TI->mayHaveSideEffects())
1514 // If the predecessor has a single successor, then the edge isn't
1516 if (TI->getNumSuccessors() == 1)
1519 // If this pointer is always safe to load, or if we can prove that there
1520 // is already a load in the block, then we can move the load to the pred
1522 if (InVal->isDereferenceablePointer() ||
1523 isSafeToLoadUnconditionally(InVal, TI, MaxAlign, &TD))
1532 void visitPHINode(PHINode &PN) {
1533 DEBUG(dbgs() << " original: " << PN << "\n");
1535 SmallVector<LoadInst *, 4> Loads;
1536 if (!isSafePHIToSpeculate(PN, Loads))
1539 assert(!Loads.empty());
1541 Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
1542 IRBuilder<> PHIBuilder(&PN);
1543 PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
1544 PN.getName() + ".sroa.speculated");
1546 // Get the TBAA tag and alignment to use from one of the loads. It doesn't
1547 // matter which one we get and if any differ, it doesn't matter.
1548 LoadInst *SomeLoad = cast<LoadInst>(Loads.back());
1549 MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
1550 unsigned Align = SomeLoad->getAlignment();
1552 // Rewrite all loads of the PN to use the new PHI.
1554 LoadInst *LI = Loads.pop_back_val();
1555 LI->replaceAllUsesWith(NewPN);
1556 Pass.DeadInsts.push_back(LI);
1557 } while (!Loads.empty());
1559 // Inject loads into all of the pred blocks.
1560 for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
1561 BasicBlock *Pred = PN.getIncomingBlock(Idx);
1562 TerminatorInst *TI = Pred->getTerminator();
1563 Use *InUse = &PN.getOperandUse(PN.getOperandNumForIncomingValue(Idx));
1564 Value *InVal = PN.getIncomingValue(Idx);
1565 IRBuilder<> PredBuilder(TI);
1568 = PredBuilder.CreateLoad(InVal, (PN.getName() + ".sroa.speculate.load." +
1570 ++NumLoadsSpeculated;
1571 Load->setAlignment(Align);
1573 Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
1574 NewPN->addIncoming(Load, Pred);
1576 Instruction *Ptr = dyn_cast<Instruction>(InVal);
1578 // No uses to rewrite.
1581 // Try to lookup and rewrite any partition uses corresponding to this phi
1583 AllocaPartitioning::iterator PI
1584 = P.findPartitionForPHIOrSelectOperand(InUse);
1588 // Replace the Use in the PartitionUse for this operand with the Use
1590 AllocaPartitioning::use_iterator UI
1591 = P.findPartitionUseForPHIOrSelectOperand(InUse);
1592 assert(isa<PHINode>(*UI->U->getUser()));
1593 UI->U = &Load->getOperandUse(Load->getPointerOperandIndex());
1595 DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
1598 /// Select instructions that use an alloca and are subsequently loaded can be
1599 /// rewritten to load both input pointers and then select between the result,
1600 /// allowing the load of the alloca to be promoted.
1602 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
1603 /// %V = load i32* %P2
1605 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1606 /// %V2 = load i32* %Other
1607 /// %V = select i1 %cond, i32 %V1, i32 %V2
1609 /// We can do this to a select if its only uses are loads and if the operand
1610 /// to the select can be loaded unconditionally.
1611 bool isSafeSelectToSpeculate(SelectInst &SI,
1612 SmallVectorImpl<LoadInst *> &Loads) {
1613 Value *TValue = SI.getTrueValue();
1614 Value *FValue = SI.getFalseValue();
1615 bool TDerefable = TValue->isDereferenceablePointer();
1616 bool FDerefable = FValue->isDereferenceablePointer();
1618 for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end();
1620 LoadInst *LI = dyn_cast<LoadInst>(*UI);
1621 if (LI == 0 || !LI->isSimple()) return false;
1623 // Both operands to the select need to be dereferencable, either
1624 // absolutely (e.g. allocas) or at this point because we can see other
1626 if (!TDerefable && !isSafeToLoadUnconditionally(TValue, LI,
1627 LI->getAlignment(), &TD))
1629 if (!FDerefable && !isSafeToLoadUnconditionally(FValue, LI,
1630 LI->getAlignment(), &TD))
1632 Loads.push_back(LI);
1638 void visitSelectInst(SelectInst &SI) {
1639 DEBUG(dbgs() << " original: " << SI << "\n");
1640 IRBuilder<> IRB(&SI);
1642 // If the select isn't safe to speculate, just use simple logic to emit it.
1643 SmallVector<LoadInst *, 4> Loads;
1644 if (!isSafeSelectToSpeculate(SI, Loads))
1647 Use *Ops[2] = { &SI.getOperandUse(1), &SI.getOperandUse(2) };
1648 AllocaPartitioning::iterator PIs[2];
1649 AllocaPartitioning::PartitionUse PUs[2];
1650 for (unsigned i = 0, e = 2; i != e; ++i) {
1651 PIs[i] = P.findPartitionForPHIOrSelectOperand(Ops[i]);
1652 if (PIs[i] != P.end()) {
1653 // If the pointer is within the partitioning, remove the select from
1654 // its uses. We'll add in the new loads below.
1655 AllocaPartitioning::use_iterator UI
1656 = P.findPartitionUseForPHIOrSelectOperand(Ops[i]);
1658 // Clear out the use here so that the offsets into the use list remain
1659 // stable but this use is ignored when rewriting.
1664 Value *TV = SI.getTrueValue();
1665 Value *FV = SI.getFalseValue();
1666 // Replace the loads of the select with a select of two loads.
1667 while (!Loads.empty()) {
1668 LoadInst *LI = Loads.pop_back_val();
1670 IRB.SetInsertPoint(LI);
1672 IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
1674 IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
1675 NumLoadsSpeculated += 2;
1677 // Transfer alignment and TBAA info if present.
1678 TL->setAlignment(LI->getAlignment());
1679 FL->setAlignment(LI->getAlignment());
1680 if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
1681 TL->setMetadata(LLVMContext::MD_tbaa, Tag);
1682 FL->setMetadata(LLVMContext::MD_tbaa, Tag);
1685 Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
1686 LI->getName() + ".sroa.speculated");
1688 LoadInst *Loads[2] = { TL, FL };
1689 for (unsigned i = 0, e = 2; i != e; ++i) {
1690 if (PIs[i] != P.end()) {
1691 Use *LoadUse = &Loads[i]->getOperandUse(0);
1692 assert(PUs[i].U->get() == LoadUse->get());
1694 P.use_push_back(PIs[i], PUs[i]);
1698 DEBUG(dbgs() << " speculated to: " << *V << "\n");
1699 LI->replaceAllUsesWith(V);
1700 Pass.DeadInsts.push_back(LI);
1706 /// \brief Accumulate the constant offsets in a GEP into a single APInt offset.
1708 /// If the provided GEP is all-constant, the total byte offset formed by the
1709 /// GEP is computed and Offset is set to it. If the GEP has any non-constant
1710 /// operands, the function returns false and the value of Offset is unmodified.
1711 static bool accumulateGEPOffsets(const DataLayout &TD, GEPOperator &GEP,
1713 APInt GEPOffset(Offset.getBitWidth(), 0);
1714 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1715 GTI != GTE; ++GTI) {
1716 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
1719 if (OpC->isZero()) continue;
1721 // Handle a struct index, which adds its field offset to the pointer.
1722 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1723 unsigned ElementIdx = OpC->getZExtValue();
1724 const StructLayout *SL = TD.getStructLayout(STy);
1725 GEPOffset += APInt(Offset.getBitWidth(),
1726 SL->getElementOffset(ElementIdx));
1730 APInt TypeSize(Offset.getBitWidth(),
1731 TD.getTypeAllocSize(GTI.getIndexedType()));
1732 if (VectorType *VTy = dyn_cast<VectorType>(*GTI)) {
1733 assert((VTy->getScalarSizeInBits() % 8) == 0 &&
1734 "vector element size is not a multiple of 8, cannot GEP over it");
1735 TypeSize = VTy->getScalarSizeInBits() / 8;
1738 GEPOffset += OpC->getValue().sextOrTrunc(Offset.getBitWidth()) * TypeSize;
1744 /// \brief Build a GEP out of a base pointer and indices.
1746 /// This will return the BasePtr if that is valid, or build a new GEP
1747 /// instruction using the IRBuilder if GEP-ing is needed.
1748 static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr,
1749 SmallVectorImpl<Value *> &Indices,
1750 const Twine &Prefix) {
1751 if (Indices.empty())
1754 // A single zero index is a no-op, so check for this and avoid building a GEP
1756 if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
1759 return IRB.CreateInBoundsGEP(BasePtr, Indices, Prefix + ".idx");
1762 /// \brief Get a natural GEP off of the BasePtr walking through Ty toward
1763 /// TargetTy without changing the offset of the pointer.
1765 /// This routine assumes we've already established a properly offset GEP with
1766 /// Indices, and arrived at the Ty type. The goal is to continue to GEP with
1767 /// zero-indices down through type layers until we find one the same as
1768 /// TargetTy. If we can't find one with the same type, we at least try to use
1769 /// one with the same size. If none of that works, we just produce the GEP as
1770 /// indicated by Indices to have the correct offset.
1771 static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD,
1772 Value *BasePtr, Type *Ty, Type *TargetTy,
1773 SmallVectorImpl<Value *> &Indices,
1774 const Twine &Prefix) {
1776 return buildGEP(IRB, BasePtr, Indices, Prefix);
1778 // See if we can descend into a struct and locate a field with the correct
1780 unsigned NumLayers = 0;
1781 Type *ElementTy = Ty;
1783 if (ElementTy->isPointerTy())
1785 if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) {
1786 ElementTy = SeqTy->getElementType();
1787 Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(), 0)));
1788 } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
1789 if (STy->element_begin() == STy->element_end())
1790 break; // Nothing left to descend into.
1791 ElementTy = *STy->element_begin();
1792 Indices.push_back(IRB.getInt32(0));
1797 } while (ElementTy != TargetTy);
1798 if (ElementTy != TargetTy)
1799 Indices.erase(Indices.end() - NumLayers, Indices.end());
1801 return buildGEP(IRB, BasePtr, Indices, Prefix);
1804 /// \brief Recursively compute indices for a natural GEP.
1806 /// This is the recursive step for getNaturalGEPWithOffset that walks down the
1807 /// element types adding appropriate indices for the GEP.
1808 static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD,
1809 Value *Ptr, Type *Ty, APInt &Offset,
1811 SmallVectorImpl<Value *> &Indices,
1812 const Twine &Prefix) {
1814 return getNaturalGEPWithType(IRB, TD, Ptr, Ty, TargetTy, Indices, Prefix);
1816 // We can't recurse through pointer types.
1817 if (Ty->isPointerTy())
1820 // We try to analyze GEPs over vectors here, but note that these GEPs are
1821 // extremely poorly defined currently. The long-term goal is to remove GEPing
1822 // over a vector from the IR completely.
1823 if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
1824 unsigned ElementSizeInBits = VecTy->getScalarSizeInBits();
1825 if (ElementSizeInBits % 8)
1826 return 0; // GEPs over non-multiple of 8 size vector elements are invalid.
1827 APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
1828 APInt NumSkippedElements = Offset.udiv(ElementSize);
1829 if (NumSkippedElements.ugt(VecTy->getNumElements()))
1831 Offset -= NumSkippedElements * ElementSize;
1832 Indices.push_back(IRB.getInt(NumSkippedElements));
1833 return getNaturalGEPRecursively(IRB, TD, Ptr, VecTy->getElementType(),
1834 Offset, TargetTy, Indices, Prefix);
1837 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
1838 Type *ElementTy = ArrTy->getElementType();
1839 APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy));
1840 APInt NumSkippedElements = Offset.udiv(ElementSize);
1841 if (NumSkippedElements.ugt(ArrTy->getNumElements()))
1844 Offset -= NumSkippedElements * ElementSize;
1845 Indices.push_back(IRB.getInt(NumSkippedElements));
1846 return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
1850 StructType *STy = dyn_cast<StructType>(Ty);
1854 const StructLayout *SL = TD.getStructLayout(STy);
1855 uint64_t StructOffset = Offset.getZExtValue();
1856 if (StructOffset >= SL->getSizeInBytes())
1858 unsigned Index = SL->getElementContainingOffset(StructOffset);
1859 Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
1860 Type *ElementTy = STy->getElementType(Index);
1861 if (Offset.uge(TD.getTypeAllocSize(ElementTy)))
1862 return 0; // The offset points into alignment padding.
1864 Indices.push_back(IRB.getInt32(Index));
1865 return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
1869 /// \brief Get a natural GEP from a base pointer to a particular offset and
1870 /// resulting in a particular type.
1872 /// The goal is to produce a "natural" looking GEP that works with the existing
1873 /// composite types to arrive at the appropriate offset and element type for
1874 /// a pointer. TargetTy is the element type the returned GEP should point-to if
1875 /// possible. We recurse by decreasing Offset, adding the appropriate index to
1876 /// Indices, and setting Ty to the result subtype.
1878 /// If no natural GEP can be constructed, this function returns null.
1879 static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD,
1880 Value *Ptr, APInt Offset, Type *TargetTy,
1881 SmallVectorImpl<Value *> &Indices,
1882 const Twine &Prefix) {
1883 PointerType *Ty = cast<PointerType>(Ptr->getType());
1885 // Don't consider any GEPs through an i8* as natural unless the TargetTy is
1887 if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8))
1890 Type *ElementTy = Ty->getElementType();
1891 if (!ElementTy->isSized())
1892 return 0; // We can't GEP through an unsized element.
1893 APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy));
1894 if (ElementSize == 0)
1895 return 0; // Zero-length arrays can't help us build a natural GEP.
1896 APInt NumSkippedElements = Offset.udiv(ElementSize);
1898 Offset -= NumSkippedElements * ElementSize;
1899 Indices.push_back(IRB.getInt(NumSkippedElements));
1900 return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
1904 /// \brief Compute an adjusted pointer from Ptr by Offset bytes where the
1905 /// resulting pointer has PointerTy.
1907 /// This tries very hard to compute a "natural" GEP which arrives at the offset
1908 /// and produces the pointer type desired. Where it cannot, it will try to use
1909 /// the natural GEP to arrive at the offset and bitcast to the type. Where that
1910 /// fails, it will try to use an existing i8* and GEP to the byte offset and
1911 /// bitcast to the type.
1913 /// The strategy for finding the more natural GEPs is to peel off layers of the
1914 /// pointer, walking back through bit casts and GEPs, searching for a base
1915 /// pointer from which we can compute a natural GEP with the desired
1916 /// properities. The algorithm tries to fold as many constant indices into
1917 /// a single GEP as possible, thus making each GEP more independent of the
1918 /// surrounding code.
1919 static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD,
1920 Value *Ptr, APInt Offset, Type *PointerTy,
1921 const Twine &Prefix) {
1922 // Even though we don't look through PHI nodes, we could be called on an
1923 // instruction in an unreachable block, which may be on a cycle.
1924 SmallPtrSet<Value *, 4> Visited;
1925 Visited.insert(Ptr);
1926 SmallVector<Value *, 4> Indices;
1928 // We may end up computing an offset pointer that has the wrong type. If we
1929 // never are able to compute one directly that has the correct type, we'll
1930 // fall back to it, so keep it around here.
1931 Value *OffsetPtr = 0;
1933 // Remember any i8 pointer we come across to re-use if we need to do a raw
1936 APInt Int8PtrOffset(Offset.getBitWidth(), 0);
1938 Type *TargetTy = PointerTy->getPointerElementType();
1941 // First fold any existing GEPs into the offset.
1942 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
1943 APInt GEPOffset(Offset.getBitWidth(), 0);
1944 if (!accumulateGEPOffsets(TD, *GEP, GEPOffset))
1946 Offset += GEPOffset;
1947 Ptr = GEP->getPointerOperand();
1948 if (!Visited.insert(Ptr))
1952 // See if we can perform a natural GEP here.
1954 if (Value *P = getNaturalGEPWithOffset(IRB, TD, Ptr, Offset, TargetTy,
1956 if (P->getType() == PointerTy) {
1957 // Zap any offset pointer that we ended up computing in previous rounds.
1958 if (OffsetPtr && OffsetPtr->use_empty())
1959 if (Instruction *I = dyn_cast<Instruction>(OffsetPtr))
1960 I->eraseFromParent();
1968 // Stash this pointer if we've found an i8*.
1969 if (Ptr->getType()->isIntegerTy(8)) {
1971 Int8PtrOffset = Offset;
1974 // Peel off a layer of the pointer and update the offset appropriately.
1975 if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
1976 Ptr = cast<Operator>(Ptr)->getOperand(0);
1977 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
1978 if (GA->mayBeOverridden())
1980 Ptr = GA->getAliasee();
1984 assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
1985 } while (Visited.insert(Ptr));
1989 Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(),
1990 Prefix + ".raw_cast");
1991 Int8PtrOffset = Offset;
1994 OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr :
1995 IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset),
1996 Prefix + ".raw_idx");
2000 // On the off chance we were targeting i8*, guard the bitcast here.
2001 if (Ptr->getType() != PointerTy)
2002 Ptr = IRB.CreateBitCast(Ptr, PointerTy, Prefix + ".cast");
2007 /// \brief Test whether we can convert a value from the old to the new type.
2009 /// This predicate should be used to guard calls to convertValue in order to
2010 /// ensure that we only try to convert viable values. The strategy is that we
2011 /// will peel off single element struct and array wrappings to get to an
2012 /// underlying value, and convert that value.
2013 static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
2016 if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy))
2018 if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
2021 if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
2022 if (NewTy->isPointerTy() && OldTy->isPointerTy())
2024 if (NewTy->isIntegerTy() || OldTy->isIntegerTy())
2032 /// \brief Generic routine to convert an SSA value to a value of a different
2035 /// This will try various different casting techniques, such as bitcasts,
2036 /// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
2037 /// two types for viability with this routine.
2038 static Value *convertValue(const DataLayout &DL, IRBuilder<> &IRB, Value *V,
2040 assert(canConvertValue(DL, V->getType(), Ty) &&
2041 "Value not convertable to type");
2042 if (V->getType() == Ty)
2044 if (V->getType()->isIntegerTy() && Ty->isPointerTy())
2045 return IRB.CreateIntToPtr(V, Ty);
2046 if (V->getType()->isPointerTy() && Ty->isIntegerTy())
2047 return IRB.CreatePtrToInt(V, Ty);
2049 return IRB.CreateBitCast(V, Ty);
2052 /// \brief Test whether the given alloca partition can be promoted to a vector.
2054 /// This is a quick test to check whether we can rewrite a particular alloca
2055 /// partition (and its newly formed alloca) into a vector alloca with only
2056 /// whole-vector loads and stores such that it could be promoted to a vector
2057 /// SSA value. We only can ensure this for a limited set of operations, and we
2058 /// don't want to do the rewrites unless we are confident that the result will
2059 /// be promotable, so we have an early test here.
2060 static bool isVectorPromotionViable(const DataLayout &TD,
2062 AllocaPartitioning &P,
2063 uint64_t PartitionBeginOffset,
2064 uint64_t PartitionEndOffset,
2065 AllocaPartitioning::const_use_iterator I,
2066 AllocaPartitioning::const_use_iterator E) {
2067 VectorType *Ty = dyn_cast<VectorType>(AllocaTy);
2071 uint64_t VecSize = TD.getTypeSizeInBits(Ty);
2072 uint64_t ElementSize = Ty->getScalarSizeInBits();
2074 // While the definition of LLVM vectors is bitpacked, we don't support sizes
2075 // that aren't byte sized.
2076 if (ElementSize % 8)
2078 assert((VecSize % 8) == 0 && "vector size not a multiple of element size?");
2082 for (; I != E; ++I) {
2084 continue; // Skip dead use.
2086 uint64_t BeginOffset = I->BeginOffset - PartitionBeginOffset;
2087 uint64_t BeginIndex = BeginOffset / ElementSize;
2088 if (BeginIndex * ElementSize != BeginOffset ||
2089 BeginIndex >= Ty->getNumElements())
2091 uint64_t EndOffset = I->EndOffset - PartitionBeginOffset;
2092 uint64_t EndIndex = EndOffset / ElementSize;
2093 if (EndIndex * ElementSize != EndOffset ||
2094 EndIndex > Ty->getNumElements())
2097 // FIXME: We should build shuffle vector instructions to handle
2098 // non-element-sized accesses.
2099 if ((EndOffset - BeginOffset) != ElementSize &&
2100 (EndOffset - BeginOffset) != VecSize)
2103 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I->U->getUser())) {
2104 if (MI->isVolatile())
2106 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I->U->getUser())) {
2107 const AllocaPartitioning::MemTransferOffsets &MTO
2108 = P.getMemTransferOffsets(*MTI);
2109 if (!MTO.IsSplittable)
2112 } else if (I->U->get()->getType()->getPointerElementType()->isStructTy()) {
2113 // Disable vector promotion when there are loads or stores of an FCA.
2115 } else if (!isa<LoadInst>(I->U->getUser()) &&
2116 !isa<StoreInst>(I->U->getUser())) {
2123 /// \brief Test whether the given alloca partition can be promoted to an int.
2125 /// This is a quick test to check whether we can rewrite a particular alloca
2126 /// partition (and its newly formed alloca) into an integer alloca suitable for
2127 /// promotion to an SSA value. We only can ensure this for a limited set of
2128 /// operations, and we don't want to do the rewrites unless we are confident
2129 /// that the result will be promotable, so we have an early test here.
2130 static bool isIntegerPromotionViable(const DataLayout &TD,
2132 uint64_t AllocBeginOffset,
2133 AllocaPartitioning &P,
2134 AllocaPartitioning::const_use_iterator I,
2135 AllocaPartitioning::const_use_iterator E) {
2136 IntegerType *Ty = dyn_cast<IntegerType>(AllocaTy);
2137 if (!Ty || 8*TD.getTypeStoreSize(Ty) != Ty->getBitWidth())
2140 // Check the uses to ensure the uses are (likely) promoteable integer uses.
2141 // Also ensure that the alloca has a covering load or store. We don't want
2142 // promote because of some other unsplittable entry (which we may make
2143 // splittable later) and lose the ability to promote each element access.
2144 bool WholeAllocaOp = false;
2145 for (; I != E; ++I) {
2147 continue; // Skip dead use.
2149 // We can't reasonably handle cases where the load or store extends past
2150 // the end of the aloca's type and into its padding.
2151 if ((I->EndOffset - AllocBeginOffset) > TD.getTypeStoreSize(Ty))
2154 if (LoadInst *LI = dyn_cast<LoadInst>(I->U->getUser())) {
2155 if (LI->isVolatile() || !LI->getType()->isIntegerTy())
2157 if (LI->getType() == Ty)
2158 WholeAllocaOp = true;
2159 } else if (StoreInst *SI = dyn_cast<StoreInst>(I->U->getUser())) {
2160 if (SI->isVolatile() || !SI->getValueOperand()->getType()->isIntegerTy())
2162 if (SI->getValueOperand()->getType() == Ty)
2163 WholeAllocaOp = true;
2164 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I->U->getUser())) {
2165 if (MI->isVolatile())
2167 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I->U->getUser())) {
2168 const AllocaPartitioning::MemTransferOffsets &MTO
2169 = P.getMemTransferOffsets(*MTI);
2170 if (!MTO.IsSplittable)
2177 return WholeAllocaOp;
2181 /// \brief Visitor to rewrite instructions using a partition of an alloca to
2182 /// use a new alloca.
2184 /// Also implements the rewriting to vector-based accesses when the partition
2185 /// passes the isVectorPromotionViable predicate. Most of the rewriting logic
2187 class AllocaPartitionRewriter : public InstVisitor<AllocaPartitionRewriter,
2189 // Befriend the base class so it can delegate to private visit methods.
2190 friend class llvm::InstVisitor<AllocaPartitionRewriter, bool>;
2192 const DataLayout &TD;
2193 AllocaPartitioning &P;
2195 AllocaInst &OldAI, &NewAI;
2196 const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
2199 // If we are rewriting an alloca partition which can be written as pure
2200 // vector operations, we stash extra information here. When VecTy is
2201 // non-null, we have some strict guarantees about the rewriten alloca:
2202 // - The new alloca is exactly the size of the vector type here.
2203 // - The accesses all either map to the entire vector or to a single
2205 // - The set of accessing instructions is only one of those handled above
2206 // in isVectorPromotionViable. Generally these are the same access kinds
2207 // which are promotable via mem2reg.
2210 uint64_t ElementSize;
2212 // This is a convenience and flag variable that will be null unless the new
2213 // alloca has a promotion-targeted integer type due to passing
2214 // isIntegerPromotionViable above. If it is non-null does, the desired
2215 // integer type will be stored here for easy access during rewriting.
2216 IntegerType *IntPromotionTy;
2218 // The offset of the partition user currently being rewritten.
2219 uint64_t BeginOffset, EndOffset;
2221 Instruction *OldPtr;
2223 // The name prefix to use when rewriting instructions for this alloca.
2224 std::string NamePrefix;
2227 AllocaPartitionRewriter(const DataLayout &TD, AllocaPartitioning &P,
2228 AllocaPartitioning::iterator PI,
2229 SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI,
2230 uint64_t NewBeginOffset, uint64_t NewEndOffset)
2231 : TD(TD), P(P), Pass(Pass),
2232 OldAI(OldAI), NewAI(NewAI),
2233 NewAllocaBeginOffset(NewBeginOffset),
2234 NewAllocaEndOffset(NewEndOffset),
2235 NewAllocaTy(NewAI.getAllocatedType()),
2236 VecTy(), ElementTy(), ElementSize(), IntPromotionTy(),
2237 BeginOffset(), EndOffset() {
2240 /// \brief Visit the users of the alloca partition and rewrite them.
2241 bool visitUsers(AllocaPartitioning::const_use_iterator I,
2242 AllocaPartitioning::const_use_iterator E) {
2243 if (isVectorPromotionViable(TD, NewAI.getAllocatedType(), P,
2244 NewAllocaBeginOffset, NewAllocaEndOffset,
2247 VecTy = cast<VectorType>(NewAI.getAllocatedType());
2248 ElementTy = VecTy->getElementType();
2249 assert((VecTy->getScalarSizeInBits() % 8) == 0 &&
2250 "Only multiple-of-8 sized vector elements are viable");
2251 ElementSize = VecTy->getScalarSizeInBits() / 8;
2252 } else if (isIntegerPromotionViable(TD, NewAI.getAllocatedType(),
2253 NewAllocaBeginOffset, P, I, E)) {
2254 IntPromotionTy = cast<IntegerType>(NewAI.getAllocatedType());
2256 bool CanSROA = true;
2257 for (; I != E; ++I) {
2259 continue; // Skip dead uses.
2260 BeginOffset = I->BeginOffset;
2261 EndOffset = I->EndOffset;
2263 OldPtr = cast<Instruction>(I->U->get());
2264 NamePrefix = (Twine(NewAI.getName()) + "." + Twine(BeginOffset)).str();
2265 CanSROA &= visit(cast<Instruction>(I->U->getUser()));
2277 // Every instruction which can end up as a user must have a rewrite rule.
2278 bool visitInstruction(Instruction &I) {
2279 DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n");
2280 llvm_unreachable("No rewrite rule for this instruction!");
2283 Twine getName(const Twine &Suffix) {
2284 return NamePrefix + Suffix;
2287 Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) {
2288 assert(BeginOffset >= NewAllocaBeginOffset);
2289 APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset);
2290 return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName(""));
2293 /// \brief Compute suitable alignment to access an offset into the new alloca.
2294 unsigned getOffsetAlign(uint64_t Offset) {
2295 unsigned NewAIAlign = NewAI.getAlignment();
2297 NewAIAlign = TD.getABITypeAlignment(NewAI.getAllocatedType());
2298 return MinAlign(NewAIAlign, Offset);
2301 /// \brief Compute suitable alignment to access this partition of the new
2303 unsigned getPartitionAlign() {
2304 return getOffsetAlign(BeginOffset - NewAllocaBeginOffset);
2307 /// \brief Compute suitable alignment to access a type at an offset of the
2310 /// \returns zero if the type's ABI alignment is a suitable alignment,
2311 /// otherwise returns the maximal suitable alignment.
2312 unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) {
2313 unsigned Align = getOffsetAlign(Offset);
2314 return Align == TD.getABITypeAlignment(Ty) ? 0 : Align;
2317 /// \brief Compute suitable alignment to access a type at the beginning of
2318 /// this partition of the new alloca.
2320 /// See \c getOffsetTypeAlign for details; this routine delegates to it.
2321 unsigned getPartitionTypeAlign(Type *Ty) {
2322 return getOffsetTypeAlign(Ty, BeginOffset - NewAllocaBeginOffset);
2325 ConstantInt *getIndex(IRBuilder<> &IRB, uint64_t Offset) {
2326 assert(VecTy && "Can only call getIndex when rewriting a vector");
2327 uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2328 assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds");
2329 uint32_t Index = RelOffset / ElementSize;
2330 assert(Index * ElementSize == RelOffset);
2331 return IRB.getInt32(Index);
2334 Value *extractInteger(IRBuilder<> &IRB, IntegerType *TargetTy,
2336 assert(IntPromotionTy && "Alloca is not an integer we can extract from");
2337 Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2339 assert(Offset >= NewAllocaBeginOffset && "Out of bounds offset");
2340 uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2341 assert(TD.getTypeStoreSize(TargetTy) + RelOffset <=
2342 TD.getTypeStoreSize(IntPromotionTy) &&
2343 "Element load outside of alloca store");
2344 uint64_t ShAmt = 8*RelOffset;
2345 if (TD.isBigEndian())
2346 ShAmt = 8*(TD.getTypeStoreSize(IntPromotionTy) -
2347 TD.getTypeStoreSize(TargetTy) - RelOffset);
2349 V = IRB.CreateLShr(V, ShAmt, getName(".shift"));
2350 if (TargetTy != IntPromotionTy) {
2351 assert(TargetTy->getBitWidth() < IntPromotionTy->getBitWidth() &&
2352 "Cannot extract to a larger integer!");
2353 V = IRB.CreateTrunc(V, TargetTy, getName(".trunc"));
2358 StoreInst *insertInteger(IRBuilder<> &IRB, Value *V, uint64_t Offset) {
2359 IntegerType *Ty = cast<IntegerType>(V->getType());
2360 if (Ty == IntPromotionTy)
2361 return IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
2363 assert(Ty->getBitWidth() < IntPromotionTy->getBitWidth() &&
2364 "Cannot insert a larger integer!");
2365 V = IRB.CreateZExt(V, IntPromotionTy, getName(".ext"));
2366 assert(Offset >= NewAllocaBeginOffset && "Out of bounds offset");
2367 uint64_t RelOffset = Offset - NewAllocaBeginOffset;
2368 assert(TD.getTypeStoreSize(Ty) + RelOffset <=
2369 TD.getTypeStoreSize(IntPromotionTy) &&
2370 "Element store outside of alloca store");
2371 uint64_t ShAmt = 8*RelOffset;
2372 if (TD.isBigEndian())
2373 ShAmt = 8*(TD.getTypeStoreSize(IntPromotionTy) - TD.getTypeStoreSize(Ty)
2376 V = IRB.CreateShl(V, ShAmt, getName(".shift"));
2378 APInt Mask = ~Ty->getMask().zext(IntPromotionTy->getBitWidth()).shl(ShAmt);
2379 Value *Old = IRB.CreateAnd(IRB.CreateAlignedLoad(&NewAI,
2380 NewAI.getAlignment(),
2381 getName(".oldload")),
2382 Mask, getName(".mask"));
2383 return IRB.CreateAlignedStore(IRB.CreateOr(Old, V, getName(".insert")),
2384 &NewAI, NewAI.getAlignment());
2387 void deleteIfTriviallyDead(Value *V) {
2388 Instruction *I = cast<Instruction>(V);
2389 if (isInstructionTriviallyDead(I))
2390 Pass.DeadInsts.push_back(I);
2393 bool rewriteVectorizedLoadInst(IRBuilder<> &IRB, LoadInst &LI, Value *OldOp) {
2395 if (LI.getType() == VecTy->getElementType() ||
2396 BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) {
2397 Result = IRB.CreateExtractElement(
2398 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")),
2399 getIndex(IRB, BeginOffset), getName(".extract"));
2401 Result = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2404 if (Result->getType() != LI.getType())
2405 Result = convertValue(TD, IRB, Result, LI.getType());
2406 LI.replaceAllUsesWith(Result);
2407 Pass.DeadInsts.push_back(&LI);
2409 DEBUG(dbgs() << " to: " << *Result << "\n");
2413 bool rewriteIntegerLoad(IRBuilder<> &IRB, LoadInst &LI) {
2414 assert(!LI.isVolatile());
2415 Value *Result = extractInteger(IRB, cast<IntegerType>(LI.getType()),
2417 LI.replaceAllUsesWith(Result);
2418 Pass.DeadInsts.push_back(&LI);
2419 DEBUG(dbgs() << " to: " << *Result << "\n");
2423 bool visitLoadInst(LoadInst &LI) {
2424 DEBUG(dbgs() << " original: " << LI << "\n");
2425 Value *OldOp = LI.getOperand(0);
2426 assert(OldOp == OldPtr);
2427 IRBuilder<> IRB(&LI);
2430 return rewriteVectorizedLoadInst(IRB, LI, OldOp);
2432 return rewriteIntegerLoad(IRB, LI);
2434 if (BeginOffset == NewAllocaBeginOffset &&
2435 canConvertValue(TD, NewAllocaTy, LI.getType())) {
2436 Value *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2437 LI.isVolatile(), getName(".load"));
2438 Value *NewV = convertValue(TD, IRB, NewLI, LI.getType());
2439 LI.replaceAllUsesWith(NewV);
2440 Pass.DeadInsts.push_back(&LI);
2442 DEBUG(dbgs() << " to: " << *NewLI << "\n");
2443 return !LI.isVolatile();
2446 Value *NewPtr = getAdjustedAllocaPtr(IRB,
2447 LI.getPointerOperand()->getType());
2448 LI.setOperand(0, NewPtr);
2449 LI.setAlignment(getPartitionTypeAlign(LI.getType()));
2450 DEBUG(dbgs() << " to: " << LI << "\n");
2452 deleteIfTriviallyDead(OldOp);
2453 return NewPtr == &NewAI && !LI.isVolatile();
2456 bool rewriteVectorizedStoreInst(IRBuilder<> &IRB, StoreInst &SI,
2458 Value *V = SI.getValueOperand();
2459 if (V->getType() == ElementTy ||
2460 BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) {
2461 if (V->getType() != ElementTy)
2462 V = convertValue(TD, IRB, V, ElementTy);
2463 LoadInst *LI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
2465 V = IRB.CreateInsertElement(LI, V, getIndex(IRB, BeginOffset),
2466 getName(".insert"));
2467 } else if (V->getType() != VecTy) {
2468 V = convertValue(TD, IRB, V, VecTy);
2470 StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
2471 Pass.DeadInsts.push_back(&SI);
2474 DEBUG(dbgs() << " to: " << *Store << "\n");
2478 bool rewriteIntegerStore(IRBuilder<> &IRB, StoreInst &SI) {
2479 assert(!SI.isVolatile());
2480 StoreInst *Store = insertInteger(IRB, SI.getValueOperand(), BeginOffset);
2481 Pass.DeadInsts.push_back(&SI);
2483 DEBUG(dbgs() << " to: " << *Store << "\n");
2487 bool visitStoreInst(StoreInst &SI) {
2488 DEBUG(dbgs() << " original: " << SI << "\n");
2489 Value *OldOp = SI.getOperand(1);
2490 assert(OldOp == OldPtr);
2491 IRBuilder<> IRB(&SI);
2494 return rewriteVectorizedStoreInst(IRB, SI, OldOp);
2496 return rewriteIntegerStore(IRB, SI);
2498 Type *ValueTy = SI.getValueOperand()->getType();
2500 // Strip all inbounds GEPs and pointer casts to try to dig out any root
2501 // alloca that should be re-examined after promoting this alloca.
2502 if (ValueTy->isPointerTy())
2503 if (AllocaInst *AI = dyn_cast<AllocaInst>(SI.getValueOperand()
2504 ->stripInBoundsOffsets()))
2505 Pass.PostPromotionWorklist.insert(AI);
2507 if (BeginOffset == NewAllocaBeginOffset &&
2508 canConvertValue(TD, ValueTy, NewAllocaTy)) {
2509 Value *NewV = convertValue(TD, IRB, SI.getValueOperand(), NewAllocaTy);
2510 StoreInst *NewSI = IRB.CreateAlignedStore(NewV, &NewAI, NewAI.getAlignment(),
2513 Pass.DeadInsts.push_back(&SI);
2515 DEBUG(dbgs() << " to: " << *NewSI << "\n");
2516 return !SI.isVolatile();
2519 Value *NewPtr = getAdjustedAllocaPtr(IRB,
2520 SI.getPointerOperand()->getType());
2521 SI.setOperand(1, NewPtr);
2522 SI.setAlignment(getPartitionTypeAlign(SI.getValueOperand()->getType()));
2523 DEBUG(dbgs() << " to: " << SI << "\n");
2525 deleteIfTriviallyDead(OldOp);
2526 return NewPtr == &NewAI && !SI.isVolatile();
2529 bool visitMemSetInst(MemSetInst &II) {
2530 DEBUG(dbgs() << " original: " << II << "\n");
2531 IRBuilder<> IRB(&II);
2532 assert(II.getRawDest() == OldPtr);
2534 // If the memset has a variable size, it cannot be split, just adjust the
2535 // pointer to the new alloca.
2536 if (!isa<Constant>(II.getLength())) {
2537 II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType()));
2538 Type *CstTy = II.getAlignmentCst()->getType();
2539 II.setAlignment(ConstantInt::get(CstTy, getPartitionAlign()));
2541 deleteIfTriviallyDead(OldPtr);
2545 // Record this instruction for deletion.
2546 if (Pass.DeadSplitInsts.insert(&II))
2547 Pass.DeadInsts.push_back(&II);
2549 Type *AllocaTy = NewAI.getAllocatedType();
2550 Type *ScalarTy = AllocaTy->getScalarType();
2552 // If this doesn't map cleanly onto the alloca type, and that type isn't
2553 // a single value type, just emit a memset.
2554 if (!VecTy && (BeginOffset != NewAllocaBeginOffset ||
2555 EndOffset != NewAllocaEndOffset ||
2556 !AllocaTy->isSingleValueType() ||
2557 !TD.isLegalInteger(TD.getTypeSizeInBits(ScalarTy)))) {
2558 Type *SizeTy = II.getLength()->getType();
2559 Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset);
2561 = IRB.CreateMemSet(getAdjustedAllocaPtr(IRB,
2562 II.getRawDest()->getType()),
2563 II.getValue(), Size, getPartitionAlign(),
2566 DEBUG(dbgs() << " to: " << *New << "\n");
2570 // If we can represent this as a simple value, we have to build the actual
2571 // value to store, which requires expanding the byte present in memset to
2572 // a sensible representation for the alloca type. This is essentially
2573 // splatting the byte to a sufficiently wide integer, bitcasting to the
2574 // desired scalar type, and splatting it across any desired vector type.
2575 Value *V = II.getValue();
2576 IntegerType *VTy = cast<IntegerType>(V->getType());
2577 Type *IntTy = Type::getIntNTy(VTy->getContext(),
2578 TD.getTypeSizeInBits(ScalarTy));
2579 if (TD.getTypeSizeInBits(ScalarTy) > VTy->getBitWidth())
2580 V = IRB.CreateMul(IRB.CreateZExt(V, IntTy, getName(".zext")),
2581 ConstantExpr::getUDiv(
2582 Constant::getAllOnesValue(IntTy),
2583 ConstantExpr::getZExt(
2584 Constant::getAllOnesValue(V->getType()),
2586 getName(".isplat"));
2587 if (V->getType() != ScalarTy) {
2588 if (ScalarTy->isPointerTy())
2589 V = IRB.CreateIntToPtr(V, ScalarTy);
2590 else if (ScalarTy->isPrimitiveType() || ScalarTy->isVectorTy())
2591 V = IRB.CreateBitCast(V, ScalarTy);
2592 else if (ScalarTy->isIntegerTy())
2593 llvm_unreachable("Computed different integer types with equal widths");
2595 llvm_unreachable("Invalid scalar type");
2598 // If this is an element-wide memset of a vectorizable alloca, insert it.
2599 if (VecTy && (BeginOffset > NewAllocaBeginOffset ||
2600 EndOffset < NewAllocaEndOffset)) {
2601 StoreInst *Store = IRB.CreateAlignedStore(
2602 IRB.CreateInsertElement(IRB.CreateAlignedLoad(&NewAI,
2603 NewAI.getAlignment(),
2605 V, getIndex(IRB, BeginOffset),
2606 getName(".insert")),
2607 &NewAI, NewAI.getAlignment());
2609 DEBUG(dbgs() << " to: " << *Store << "\n");
2613 // Splat to a vector if needed.
2614 if (VectorType *VecTy = dyn_cast<VectorType>(AllocaTy)) {
2615 VectorType *SplatSourceTy = VectorType::get(V->getType(), 1);
2616 V = IRB.CreateShuffleVector(
2617 IRB.CreateInsertElement(UndefValue::get(SplatSourceTy), V,
2618 IRB.getInt32(0), getName(".vsplat.insert")),
2619 UndefValue::get(SplatSourceTy),
2620 ConstantVector::getSplat(VecTy->getNumElements(), IRB.getInt32(0)),
2621 getName(".vsplat.shuffle"));
2622 assert(V->getType() == VecTy);
2625 Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
2628 DEBUG(dbgs() << " to: " << *New << "\n");
2629 return !II.isVolatile();
2632 bool visitMemTransferInst(MemTransferInst &II) {
2633 // Rewriting of memory transfer instructions can be a bit tricky. We break
2634 // them into two categories: split intrinsics and unsplit intrinsics.
2636 DEBUG(dbgs() << " original: " << II << "\n");
2637 IRBuilder<> IRB(&II);
2639 assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr);
2640 bool IsDest = II.getRawDest() == OldPtr;
2642 const AllocaPartitioning::MemTransferOffsets &MTO
2643 = P.getMemTransferOffsets(II);
2645 // Compute the relative offset within the transfer.
2646 unsigned IntPtrWidth = TD.getPointerSizeInBits();
2647 APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
2648 : MTO.SourceBegin));
2650 unsigned Align = II.getAlignment();
2652 Align = MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(),
2653 MinAlign(II.getAlignment(), getPartitionAlign()));
2655 // For unsplit intrinsics, we simply modify the source and destination
2656 // pointers in place. This isn't just an optimization, it is a matter of
2657 // correctness. With unsplit intrinsics we may be dealing with transfers
2658 // within a single alloca before SROA ran, or with transfers that have
2659 // a variable length. We may also be dealing with memmove instead of
2660 // memcpy, and so simply updating the pointers is the necessary for us to
2661 // update both source and dest of a single call.
2662 if (!MTO.IsSplittable) {
2663 Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource();
2665 II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType()));
2667 II.setSource(getAdjustedAllocaPtr(IRB, II.getRawSource()->getType()));
2669 Type *CstTy = II.getAlignmentCst()->getType();
2670 II.setAlignment(ConstantInt::get(CstTy, Align));
2672 DEBUG(dbgs() << " to: " << II << "\n");
2673 deleteIfTriviallyDead(OldOp);
2676 // For split transfer intrinsics we have an incredibly useful assurance:
2677 // the source and destination do not reside within the same alloca, and at
2678 // least one of them does not escape. This means that we can replace
2679 // memmove with memcpy, and we don't need to worry about all manner of
2680 // downsides to splitting and transforming the operations.
2682 // If this doesn't map cleanly onto the alloca type, and that type isn't
2683 // a single value type, just emit a memcpy.
2685 = !VecTy && (BeginOffset != NewAllocaBeginOffset ||
2686 EndOffset != NewAllocaEndOffset ||
2687 !NewAI.getAllocatedType()->isSingleValueType());
2689 // If we're just going to emit a memcpy, the alloca hasn't changed, and the
2690 // size hasn't been shrunk based on analysis of the viable range, this is
2692 if (EmitMemCpy && &OldAI == &NewAI) {
2693 uint64_t OrigBegin = IsDest ? MTO.DestBegin : MTO.SourceBegin;
2694 uint64_t OrigEnd = IsDest ? MTO.DestEnd : MTO.SourceEnd;
2695 // Ensure the start lines up.
2696 assert(BeginOffset == OrigBegin);
2699 // Rewrite the size as needed.
2700 if (EndOffset != OrigEnd)
2701 II.setLength(ConstantInt::get(II.getLength()->getType(),
2702 EndOffset - BeginOffset));
2705 // Record this instruction for deletion.
2706 if (Pass.DeadSplitInsts.insert(&II))
2707 Pass.DeadInsts.push_back(&II);
2709 bool IsVectorElement = VecTy && (BeginOffset > NewAllocaBeginOffset ||
2710 EndOffset < NewAllocaEndOffset);
2712 Type *OtherPtrTy = IsDest ? II.getRawSource()->getType()
2713 : II.getRawDest()->getType();
2715 OtherPtrTy = IsVectorElement ? VecTy->getElementType()->getPointerTo()
2718 // Compute the other pointer, folding as much as possible to produce
2719 // a single, simple GEP in most cases.
2720 Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
2721 OtherPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy,
2722 getName("." + OtherPtr->getName()));
2724 // Strip all inbounds GEPs and pointer casts to try to dig out any root
2725 // alloca that should be re-examined after rewriting this instruction.
2727 = dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets()))
2728 Pass.Worklist.insert(AI);
2732 = getAdjustedAllocaPtr(IRB, IsDest ? II.getRawDest()->getType()
2733 : II.getRawSource()->getType());
2734 Type *SizeTy = II.getLength()->getType();
2735 Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset);
2737 CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
2738 IsDest ? OtherPtr : OurPtr,
2739 Size, Align, II.isVolatile());
2741 DEBUG(dbgs() << " to: " << *New << "\n");
2745 // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy
2746 // is equivalent to 1, but that isn't true if we end up rewriting this as
2751 Value *SrcPtr = OtherPtr;
2752 Value *DstPtr = &NewAI;
2754 std::swap(SrcPtr, DstPtr);
2757 if (IsVectorElement && !IsDest) {
2758 // We have to extract rather than load.
2759 Src = IRB.CreateExtractElement(
2760 IRB.CreateAlignedLoad(SrcPtr, Align, getName(".copyload")),
2761 getIndex(IRB, BeginOffset),
2762 getName(".copyextract"));
2764 Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(),
2765 getName(".copyload"));
2768 if (IsVectorElement && IsDest) {
2769 // We have to insert into a loaded copy before storing.
2770 Src = IRB.CreateInsertElement(
2771 IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")),
2772 Src, getIndex(IRB, BeginOffset),
2773 getName(".insert"));
2776 StoreInst *Store = cast<StoreInst>(
2777 IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile()));
2779 DEBUG(dbgs() << " to: " << *Store << "\n");
2780 return !II.isVolatile();
2783 bool visitIntrinsicInst(IntrinsicInst &II) {
2784 assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
2785 II.getIntrinsicID() == Intrinsic::lifetime_end);
2786 DEBUG(dbgs() << " original: " << II << "\n");
2787 IRBuilder<> IRB(&II);
2788 assert(II.getArgOperand(1) == OldPtr);
2790 // Record this instruction for deletion.
2791 if (Pass.DeadSplitInsts.insert(&II))
2792 Pass.DeadInsts.push_back(&II);
2795 = ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
2796 EndOffset - BeginOffset);
2797 Value *Ptr = getAdjustedAllocaPtr(IRB, II.getArgOperand(1)->getType());
2799 if (II.getIntrinsicID() == Intrinsic::lifetime_start)
2800 New = IRB.CreateLifetimeStart(Ptr, Size);
2802 New = IRB.CreateLifetimeEnd(Ptr, Size);
2804 DEBUG(dbgs() << " to: " << *New << "\n");
2808 bool visitPHINode(PHINode &PN) {
2809 DEBUG(dbgs() << " original: " << PN << "\n");
2811 // We would like to compute a new pointer in only one place, but have it be
2812 // as local as possible to the PHI. To do that, we re-use the location of
2813 // the old pointer, which necessarily must be in the right position to
2814 // dominate the PHI.
2815 IRBuilder<> PtrBuilder(cast<Instruction>(OldPtr));
2817 Value *NewPtr = getAdjustedAllocaPtr(PtrBuilder, OldPtr->getType());
2818 // Replace the operands which were using the old pointer.
2819 User::op_iterator OI = PN.op_begin(), OE = PN.op_end();
2820 for (; OI != OE; ++OI)
2824 DEBUG(dbgs() << " to: " << PN << "\n");
2825 deleteIfTriviallyDead(OldPtr);
2829 bool visitSelectInst(SelectInst &SI) {
2830 DEBUG(dbgs() << " original: " << SI << "\n");
2831 IRBuilder<> IRB(&SI);
2833 // Find the operand we need to rewrite here.
2834 bool IsTrueVal = SI.getTrueValue() == OldPtr;
2836 assert(SI.getFalseValue() != OldPtr && "Pointer is both operands!");
2838 assert(SI.getFalseValue() == OldPtr && "Pointer isn't an operand!");
2840 Value *NewPtr = getAdjustedAllocaPtr(IRB, OldPtr->getType());
2841 SI.setOperand(IsTrueVal ? 1 : 2, NewPtr);
2842 DEBUG(dbgs() << " to: " << SI << "\n");
2843 deleteIfTriviallyDead(OldPtr);
2851 /// \brief Visitor to rewrite aggregate loads and stores as scalar.
2853 /// This pass aggressively rewrites all aggregate loads and stores on
2854 /// a particular pointer (or any pointer derived from it which we can identify)
2855 /// with scalar loads and stores.
2856 class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
2857 // Befriend the base class so it can delegate to private visit methods.
2858 friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>;
2860 const DataLayout &TD;
2862 /// Queue of pointer uses to analyze and potentially rewrite.
2863 SmallVector<Use *, 8> Queue;
2865 /// Set to prevent us from cycling with phi nodes and loops.
2866 SmallPtrSet<User *, 8> Visited;
2868 /// The current pointer use being rewritten. This is used to dig up the used
2869 /// value (as opposed to the user).
2873 AggLoadStoreRewriter(const DataLayout &TD) : TD(TD) {}
2875 /// Rewrite loads and stores through a pointer and all pointers derived from
2877 bool rewrite(Instruction &I) {
2878 DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
2880 bool Changed = false;
2881 while (!Queue.empty()) {
2882 U = Queue.pop_back_val();
2883 Changed |= visit(cast<Instruction>(U->getUser()));
2889 /// Enqueue all the users of the given instruction for further processing.
2890 /// This uses a set to de-duplicate users.
2891 void enqueueUsers(Instruction &I) {
2892 for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
2894 if (Visited.insert(*UI))
2895 Queue.push_back(&UI.getUse());
2898 // Conservative default is to not rewrite anything.
2899 bool visitInstruction(Instruction &I) { return false; }
2901 /// \brief Generic recursive split emission class.
2902 template <typename Derived>
2905 /// The builder used to form new instructions.
2907 /// The indices which to be used with insert- or extractvalue to select the
2908 /// appropriate value within the aggregate.
2909 SmallVector<unsigned, 4> Indices;
2910 /// The indices to a GEP instruction which will move Ptr to the correct slot
2911 /// within the aggregate.
2912 SmallVector<Value *, 4> GEPIndices;
2913 /// The base pointer of the original op, used as a base for GEPing the
2914 /// split operations.
2917 /// Initialize the splitter with an insertion point, Ptr and start with a
2918 /// single zero GEP index.
2919 OpSplitter(Instruction *InsertionPoint, Value *Ptr)
2920 : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {}
2923 /// \brief Generic recursive split emission routine.
2925 /// This method recursively splits an aggregate op (load or store) into
2926 /// scalar or vector ops. It splits recursively until it hits a single value
2927 /// and emits that single value operation via the template argument.
2929 /// The logic of this routine relies on GEPs and insertvalue and
2930 /// extractvalue all operating with the same fundamental index list, merely
2931 /// formatted differently (GEPs need actual values).
2933 /// \param Ty The type being split recursively into smaller ops.
2934 /// \param Agg The aggregate value being built up or stored, depending on
2935 /// whether this is splitting a load or a store respectively.
2936 void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
2937 if (Ty->isSingleValueType())
2938 return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name);
2940 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2941 unsigned OldSize = Indices.size();
2943 for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
2945 assert(Indices.size() == OldSize && "Did not return to the old size");
2946 Indices.push_back(Idx);
2947 GEPIndices.push_back(IRB.getInt32(Idx));
2948 emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
2949 GEPIndices.pop_back();
2955 if (StructType *STy = dyn_cast<StructType>(Ty)) {
2956 unsigned OldSize = Indices.size();
2958 for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
2960 assert(Indices.size() == OldSize && "Did not return to the old size");
2961 Indices.push_back(Idx);
2962 GEPIndices.push_back(IRB.getInt32(Idx));
2963 emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
2964 GEPIndices.pop_back();
2970 llvm_unreachable("Only arrays and structs are aggregate loadable types");
2974 struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
2975 LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr)
2976 : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr) {}
2978 /// Emit a leaf load of a single value. This is called at the leaves of the
2979 /// recursive emission to actually load values.
2980 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
2981 assert(Ty->isSingleValueType());
2982 // Load the single value and insert it using the indices.
2983 Value *Load = IRB.CreateLoad(IRB.CreateInBoundsGEP(Ptr, GEPIndices,
2986 Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
2987 DEBUG(dbgs() << " to: " << *Load << "\n");
2991 bool visitLoadInst(LoadInst &LI) {
2992 assert(LI.getPointerOperand() == *U);
2993 if (!LI.isSimple() || LI.getType()->isSingleValueType())
2996 // We have an aggregate being loaded, split it apart.
2997 DEBUG(dbgs() << " original: " << LI << "\n");
2998 LoadOpSplitter Splitter(&LI, *U);
2999 Value *V = UndefValue::get(LI.getType());
3000 Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
3001 LI.replaceAllUsesWith(V);
3002 LI.eraseFromParent();
3006 struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
3007 StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr)
3008 : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr) {}
3010 /// Emit a leaf store of a single value. This is called at the leaves of the
3011 /// recursive emission to actually produce stores.
3012 void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
3013 assert(Ty->isSingleValueType());
3014 // Extract the single value and store it using the indices.
3015 Value *Store = IRB.CreateStore(
3016 IRB.CreateExtractValue(Agg, Indices, Name + ".extract"),
3017 IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep"));
3019 DEBUG(dbgs() << " to: " << *Store << "\n");
3023 bool visitStoreInst(StoreInst &SI) {
3024 if (!SI.isSimple() || SI.getPointerOperand() != *U)
3026 Value *V = SI.getValueOperand();
3027 if (V->getType()->isSingleValueType())
3030 // We have an aggregate being stored, split it apart.
3031 DEBUG(dbgs() << " original: " << SI << "\n");
3032 StoreOpSplitter Splitter(&SI, *U);
3033 Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
3034 SI.eraseFromParent();
3038 bool visitBitCastInst(BitCastInst &BC) {
3043 bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
3048 bool visitPHINode(PHINode &PN) {
3053 bool visitSelectInst(SelectInst &SI) {
3060 /// \brief Strip aggregate type wrapping.
3062 /// This removes no-op aggregate types wrapping an underlying type. It will
3063 /// strip as many layers of types as it can without changing either the type
3064 /// size or the allocated size.
3065 static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) {
3066 if (Ty->isSingleValueType())
3069 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3070 uint64_t TypeSize = DL.getTypeSizeInBits(Ty);
3073 if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
3074 InnerTy = ArrTy->getElementType();
3075 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
3076 const StructLayout *SL = DL.getStructLayout(STy);
3077 unsigned Index = SL->getElementContainingOffset(0);
3078 InnerTy = STy->getElementType(Index);
3083 if (AllocSize > DL.getTypeAllocSize(InnerTy) ||
3084 TypeSize > DL.getTypeSizeInBits(InnerTy))
3087 return stripAggregateTypeWrapping(DL, InnerTy);
3090 /// \brief Try to find a partition of the aggregate type passed in for a given
3091 /// offset and size.
3093 /// This recurses through the aggregate type and tries to compute a subtype
3094 /// based on the offset and size. When the offset and size span a sub-section
3095 /// of an array, it will even compute a new array type for that sub-section,
3096 /// and the same for structs.
3098 /// Note that this routine is very strict and tries to find a partition of the
3099 /// type which produces the *exact* right offset and size. It is not forgiving
3100 /// when the size or offset cause either end of type-based partition to be off.
3101 /// Also, this is a best-effort routine. It is reasonable to give up and not
3102 /// return a type if necessary.
3103 static Type *getTypePartition(const DataLayout &TD, Type *Ty,
3104 uint64_t Offset, uint64_t Size) {
3105 if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size)
3106 return stripAggregateTypeWrapping(TD, Ty);
3108 if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
3109 // We can't partition pointers...
3110 if (SeqTy->isPointerTy())
3113 Type *ElementTy = SeqTy->getElementType();
3114 uint64_t ElementSize = TD.getTypeAllocSize(ElementTy);
3115 uint64_t NumSkippedElements = Offset / ElementSize;
3116 if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy))
3117 if (NumSkippedElements >= ArrTy->getNumElements())
3119 if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy))
3120 if (NumSkippedElements >= VecTy->getNumElements())
3122 Offset -= NumSkippedElements * ElementSize;
3124 // First check if we need to recurse.
3125 if (Offset > 0 || Size < ElementSize) {
3126 // Bail if the partition ends in a different array element.
3127 if ((Offset + Size) > ElementSize)
3129 // Recurse through the element type trying to peel off offset bytes.
3130 return getTypePartition(TD, ElementTy, Offset, Size);
3132 assert(Offset == 0);
3134 if (Size == ElementSize)
3135 return stripAggregateTypeWrapping(TD, ElementTy);
3136 assert(Size > ElementSize);
3137 uint64_t NumElements = Size / ElementSize;
3138 if (NumElements * ElementSize != Size)
3140 return ArrayType::get(ElementTy, NumElements);
3143 StructType *STy = dyn_cast<StructType>(Ty);
3147 const StructLayout *SL = TD.getStructLayout(STy);
3148 if (Offset >= SL->getSizeInBytes())
3150 uint64_t EndOffset = Offset + Size;
3151 if (EndOffset > SL->getSizeInBytes())
3154 unsigned Index = SL->getElementContainingOffset(Offset);
3155 Offset -= SL->getElementOffset(Index);
3157 Type *ElementTy = STy->getElementType(Index);
3158 uint64_t ElementSize = TD.getTypeAllocSize(ElementTy);
3159 if (Offset >= ElementSize)
3160 return 0; // The offset points into alignment padding.
3162 // See if any partition must be contained by the element.
3163 if (Offset > 0 || Size < ElementSize) {
3164 if ((Offset + Size) > ElementSize)
3166 return getTypePartition(TD, ElementTy, Offset, Size);
3168 assert(Offset == 0);
3170 if (Size == ElementSize)
3171 return stripAggregateTypeWrapping(TD, ElementTy);
3173 StructType::element_iterator EI = STy->element_begin() + Index,
3174 EE = STy->element_end();
3175 if (EndOffset < SL->getSizeInBytes()) {
3176 unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
3177 if (Index == EndIndex)
3178 return 0; // Within a single element and its padding.
3180 // Don't try to form "natural" types if the elements don't line up with the
3182 // FIXME: We could potentially recurse down through the last element in the
3183 // sub-struct to find a natural end point.
3184 if (SL->getElementOffset(EndIndex) != EndOffset)
3187 assert(Index < EndIndex);
3188 EE = STy->element_begin() + EndIndex;
3191 // Try to build up a sub-structure.
3192 SmallVector<Type *, 4> ElementTys;
3194 ElementTys.push_back(*EI++);
3196 StructType *SubTy = StructType::get(STy->getContext(), ElementTys,
3198 const StructLayout *SubSL = TD.getStructLayout(SubTy);
3199 if (Size != SubSL->getSizeInBytes())
3200 return 0; // The sub-struct doesn't have quite the size needed.
3205 /// \brief Rewrite an alloca partition's users.
3207 /// This routine drives both of the rewriting goals of the SROA pass. It tries
3208 /// to rewrite uses of an alloca partition to be conducive for SSA value
3209 /// promotion. If the partition needs a new, more refined alloca, this will
3210 /// build that new alloca, preserving as much type information as possible, and
3211 /// rewrite the uses of the old alloca to point at the new one and have the
3212 /// appropriate new offsets. It also evaluates how successful the rewrite was
3213 /// at enabling promotion and if it was successful queues the alloca to be
3215 bool SROA::rewriteAllocaPartition(AllocaInst &AI,
3216 AllocaPartitioning &P,
3217 AllocaPartitioning::iterator PI) {
3218 uint64_t AllocaSize = PI->EndOffset - PI->BeginOffset;
3219 bool IsLive = false;
3220 for (AllocaPartitioning::use_iterator UI = P.use_begin(PI),
3222 UI != UE && !IsLive; ++UI)
3226 return false; // No live uses left of this partition.
3228 DEBUG(dbgs() << "Speculating PHIs and selects in partition "
3229 << "[" << PI->BeginOffset << "," << PI->EndOffset << ")\n");
3231 PHIOrSelectSpeculator Speculator(*TD, P, *this);
3232 DEBUG(dbgs() << " speculating ");
3233 DEBUG(P.print(dbgs(), PI, ""));
3234 Speculator.visitUsers(PI);
3236 // Try to compute a friendly type for this partition of the alloca. This
3237 // won't always succeed, in which case we fall back to a legal integer type
3238 // or an i8 array of an appropriate size.
3240 if (Type *PartitionTy = P.getCommonType(PI))
3241 if (TD->getTypeAllocSize(PartitionTy) >= AllocaSize)
3242 AllocaTy = PartitionTy;
3244 if (Type *PartitionTy = getTypePartition(*TD, AI.getAllocatedType(),
3245 PI->BeginOffset, AllocaSize))
3246 AllocaTy = PartitionTy;
3248 (AllocaTy->isArrayTy() &&
3249 AllocaTy->getArrayElementType()->isIntegerTy())) &&
3250 TD->isLegalInteger(AllocaSize * 8))
3251 AllocaTy = Type::getIntNTy(*C, AllocaSize * 8);
3253 AllocaTy = ArrayType::get(Type::getInt8Ty(*C), AllocaSize);
3254 assert(TD->getTypeAllocSize(AllocaTy) >= AllocaSize);
3256 // Check for the case where we're going to rewrite to a new alloca of the
3257 // exact same type as the original, and with the same access offsets. In that
3258 // case, re-use the existing alloca, but still run through the rewriter to
3259 // performe phi and select speculation.
3261 if (AllocaTy == AI.getAllocatedType()) {
3262 assert(PI->BeginOffset == 0 &&
3263 "Non-zero begin offset but same alloca type");
3264 assert(PI == P.begin() && "Begin offset is zero on later partition");
3267 unsigned Alignment = AI.getAlignment();
3269 // The minimum alignment which users can rely on when the explicit
3270 // alignment is omitted or zero is that required by the ABI for this
3272 Alignment = TD->getABITypeAlignment(AI.getAllocatedType());
3274 Alignment = MinAlign(Alignment, PI->BeginOffset);
3275 // If we will get at least this much alignment from the type alone, leave
3276 // the alloca's alignment unconstrained.
3277 if (Alignment <= TD->getABITypeAlignment(AllocaTy))
3279 NewAI = new AllocaInst(AllocaTy, 0, Alignment,
3280 AI.getName() + ".sroa." + Twine(PI - P.begin()),
3285 DEBUG(dbgs() << "Rewriting alloca partition "
3286 << "[" << PI->BeginOffset << "," << PI->EndOffset << ") to: "
3289 // Track the high watermark of the post-promotion worklist. We will reset it
3290 // to this point if the alloca is not in fact scheduled for promotion.
3291 unsigned PPWOldSize = PostPromotionWorklist.size();
3293 AllocaPartitionRewriter Rewriter(*TD, P, PI, *this, AI, *NewAI,
3294 PI->BeginOffset, PI->EndOffset);
3295 DEBUG(dbgs() << " rewriting ");
3296 DEBUG(P.print(dbgs(), PI, ""));
3297 bool Promotable = Rewriter.visitUsers(P.use_begin(PI), P.use_end(PI));
3299 DEBUG(dbgs() << " and queuing for promotion\n");
3300 PromotableAllocas.push_back(NewAI);
3301 } else if (NewAI != &AI) {
3302 // If we can't promote the alloca, iterate on it to check for new
3303 // refinements exposed by splitting the current alloca. Don't iterate on an
3304 // alloca which didn't actually change and didn't get promoted.
3305 Worklist.insert(NewAI);
3308 // Drop any post-promotion work items if promotion didn't happen.
3310 while (PostPromotionWorklist.size() > PPWOldSize)
3311 PostPromotionWorklist.pop_back();
3316 /// \brief Walks the partitioning of an alloca rewriting uses of each partition.
3317 bool SROA::splitAlloca(AllocaInst &AI, AllocaPartitioning &P) {
3318 bool Changed = false;
3319 for (AllocaPartitioning::iterator PI = P.begin(), PE = P.end(); PI != PE;
3321 Changed |= rewriteAllocaPartition(AI, P, PI);
3326 /// \brief Analyze an alloca for SROA.
3328 /// This analyzes the alloca to ensure we can reason about it, builds
3329 /// a partitioning of the alloca, and then hands it off to be split and
3330 /// rewritten as needed.
3331 bool SROA::runOnAlloca(AllocaInst &AI) {
3332 DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
3333 ++NumAllocasAnalyzed;
3335 // Special case dead allocas, as they're trivial.
3336 if (AI.use_empty()) {
3337 AI.eraseFromParent();
3341 // Skip alloca forms that this analysis can't handle.
3342 if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
3343 TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
3346 bool Changed = false;
3348 // First, split any FCA loads and stores touching this alloca to promote
3349 // better splitting and promotion opportunities.
3350 AggLoadStoreRewriter AggRewriter(*TD);
3351 Changed |= AggRewriter.rewrite(AI);
3353 // Build the partition set using a recursive instruction-visiting builder.
3354 AllocaPartitioning P(*TD, AI);
3355 DEBUG(P.print(dbgs()));
3359 // Delete all the dead users of this alloca before splitting and rewriting it.
3360 for (AllocaPartitioning::dead_user_iterator DI = P.dead_user_begin(),
3361 DE = P.dead_user_end();
3364 (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType()));
3365 DeadInsts.push_back(*DI);
3367 for (AllocaPartitioning::dead_op_iterator DO = P.dead_op_begin(),
3368 DE = P.dead_op_end();
3371 // Clobber the use with an undef value.
3372 **DO = UndefValue::get(OldV->getType());
3373 if (Instruction *OldI = dyn_cast<Instruction>(OldV))
3374 if (isInstructionTriviallyDead(OldI)) {
3376 DeadInsts.push_back(OldI);
3380 // No partitions to split. Leave the dead alloca for a later pass to clean up.
3381 if (P.begin() == P.end())
3384 return splitAlloca(AI, P) || Changed;
3387 /// \brief Delete the dead instructions accumulated in this run.
3389 /// Recursively deletes the dead instructions we've accumulated. This is done
3390 /// at the very end to maximize locality of the recursive delete and to
3391 /// minimize the problems of invalidated instruction pointers as such pointers
3392 /// are used heavily in the intermediate stages of the algorithm.
3394 /// We also record the alloca instructions deleted here so that they aren't
3395 /// subsequently handed to mem2reg to promote.
3396 void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
3397 DeadSplitInsts.clear();
3398 while (!DeadInsts.empty()) {
3399 Instruction *I = DeadInsts.pop_back_val();
3400 DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
3402 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
3403 if (Instruction *U = dyn_cast<Instruction>(*OI)) {
3404 // Zero out the operand and see if it becomes trivially dead.
3406 if (isInstructionTriviallyDead(U))
3407 DeadInsts.push_back(U);
3410 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
3411 DeletedAllocas.insert(AI);
3414 I->eraseFromParent();
3418 /// \brief Promote the allocas, using the best available technique.
3420 /// This attempts to promote whatever allocas have been identified as viable in
3421 /// the PromotableAllocas list. If that list is empty, there is nothing to do.
3422 /// If there is a domtree available, we attempt to promote using the full power
3423 /// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is
3424 /// based on the SSAUpdater utilities. This function returns whether any
3425 /// promotion occured.
3426 bool SROA::promoteAllocas(Function &F) {
3427 if (PromotableAllocas.empty())
3430 NumPromoted += PromotableAllocas.size();
3432 if (DT && !ForceSSAUpdater) {
3433 DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
3434 PromoteMemToReg(PromotableAllocas, *DT);
3435 PromotableAllocas.clear();
3439 DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n");
3441 DIBuilder DIB(*F.getParent());
3442 SmallVector<Instruction*, 64> Insts;
3444 for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) {
3445 AllocaInst *AI = PromotableAllocas[Idx];
3446 for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
3448 Instruction *I = cast<Instruction>(*UI++);
3449 // FIXME: Currently the SSAUpdater infrastructure doesn't reason about
3450 // lifetime intrinsics and so we strip them (and the bitcasts+GEPs
3451 // leading to them) here. Eventually it should use them to optimize the
3452 // scalar values produced.
3453 if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
3454 assert(onlyUsedByLifetimeMarkers(I) &&
3455 "Found a bitcast used outside of a lifetime marker.");
3456 while (!I->use_empty())
3457 cast<Instruction>(*I->use_begin())->eraseFromParent();
3458 I->eraseFromParent();
3461 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
3462 assert(II->getIntrinsicID() == Intrinsic::lifetime_start ||
3463 II->getIntrinsicID() == Intrinsic::lifetime_end);
3464 II->eraseFromParent();
3470 AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts);
3474 PromotableAllocas.clear();
3479 /// \brief A predicate to test whether an alloca belongs to a set.
3480 class IsAllocaInSet {
3481 typedef SmallPtrSet<AllocaInst *, 4> SetType;
3485 typedef AllocaInst *argument_type;
3487 IsAllocaInSet(const SetType &Set) : Set(Set) {}
3488 bool operator()(AllocaInst *AI) const { return Set.count(AI); }
3492 bool SROA::runOnFunction(Function &F) {
3493 DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
3494 C = &F.getContext();
3495 TD = getAnalysisIfAvailable<DataLayout>();
3497 DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
3500 DT = getAnalysisIfAvailable<DominatorTree>();
3502 BasicBlock &EntryBB = F.getEntryBlock();
3503 for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end());
3505 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
3506 Worklist.insert(AI);
3508 bool Changed = false;
3509 // A set of deleted alloca instruction pointers which should be removed from
3510 // the list of promotable allocas.
3511 SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
3514 while (!Worklist.empty()) {
3515 Changed |= runOnAlloca(*Worklist.pop_back_val());
3516 deleteDeadInstructions(DeletedAllocas);
3518 // Remove the deleted allocas from various lists so that we don't try to
3519 // continue processing them.
3520 if (!DeletedAllocas.empty()) {
3521 Worklist.remove_if(IsAllocaInSet(DeletedAllocas));
3522 PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas));
3523 PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
3524 PromotableAllocas.end(),
3525 IsAllocaInSet(DeletedAllocas)),
3526 PromotableAllocas.end());
3527 DeletedAllocas.clear();
3531 Changed |= promoteAllocas(F);
3533 Worklist = PostPromotionWorklist;
3534 PostPromotionWorklist.clear();
3535 } while (!Worklist.empty());
3540 void SROA::getAnalysisUsage(AnalysisUsage &AU) const {
3541 if (RequiresDomTree)
3542 AU.addRequired<DominatorTree>();
3543 AU.setPreservesCFG();