1 //===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This transformation implements the well known scalar replacement of
11 // aggregates transformation. This xform breaks up alloca instructions of
12 // aggregate type (structure or array) into individual alloca instructions for
13 // each member (if possible). Then, if possible, it transforms the individual
14 // alloca instructions into nice clean scalar SSA form.
16 // This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17 // often interact, especially for C++ programs. As such, iterating between
18 // SRoA, then Mem2Reg until we run out of things to promote works well.
20 //===----------------------------------------------------------------------===//
22 #define DEBUG_TYPE "scalarrepl"
23 #include "llvm/Transforms/Scalar.h"
24 #include "llvm/Constants.h"
25 #include "llvm/DerivedTypes.h"
26 #include "llvm/Function.h"
27 #include "llvm/GlobalVariable.h"
28 #include "llvm/Instructions.h"
29 #include "llvm/IntrinsicInst.h"
30 #include "llvm/Pass.h"
31 #include "llvm/Analysis/Dominators.h"
32 #include "llvm/Target/TargetData.h"
33 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/GetElementPtrTypeIterator.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/ADT/StringExtras.h"
43 STATISTIC(NumReplaced, "Number of allocas broken up");
44 STATISTIC(NumPromoted, "Number of allocas promoted");
45 STATISTIC(NumConverted, "Number of aggregates converted to scalar");
46 STATISTIC(NumGlobals, "Number of allocas copied from constant global");
49 struct VISIBILITY_HIDDEN SROA : public FunctionPass {
50 static char ID; // Pass identification, replacement for typeid
51 explicit SROA(signed T = -1) : FunctionPass((intptr_t)&ID) {
58 bool runOnFunction(Function &F);
60 bool performScalarRepl(Function &F);
61 bool performPromotion(Function &F);
63 // getAnalysisUsage - This pass does not require any passes, but we know it
64 // will not alter the CFG, so say so.
65 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
66 AU.addRequired<DominatorTree>();
67 AU.addRequired<DominanceFrontier>();
68 AU.addRequired<TargetData>();
73 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures
74 /// information about the uses. All these fields are initialized to false
75 /// and set to true when something is learned.
77 /// isUnsafe - This is set to true if the alloca cannot be SROA'd.
80 /// needsCanon - This is set to true if there is some use of the alloca
81 /// that requires canonicalization.
84 /// isMemCpySrc - This is true if this aggregate is memcpy'd from.
87 /// isMemCpyDst - This is true if this aggregate is memcpy'd into.
91 : isUnsafe(false), needsCanon(false),
92 isMemCpySrc(false), isMemCpyDst(false) {}
97 void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; }
99 int isSafeAllocaToScalarRepl(AllocationInst *AI);
101 void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
103 void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI,
105 void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
106 unsigned OpNo, AllocaInfo &Info);
107 void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI,
110 void DoScalarReplacement(AllocationInst *AI,
111 std::vector<AllocationInst*> &WorkList);
112 void CanonicalizeAllocaUsers(AllocationInst *AI);
113 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
115 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
116 SmallVector<AllocaInst*, 32> &NewElts);
118 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial);
119 void ConvertToScalar(AllocationInst *AI, const Type *Ty);
120 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset);
121 Value *ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI,
123 Value *ConvertUsesOfStoreToScalar(StoreInst *SI, AllocaInst *NewAI,
125 static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI);
130 static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
132 // Public interface to the ScalarReplAggregates pass
133 FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) {
134 return new SROA(Threshold);
138 bool SROA::runOnFunction(Function &F) {
139 bool Changed = performPromotion(F);
141 bool LocalChange = performScalarRepl(F);
142 if (!LocalChange) break; // No need to repromote if no scalarrepl
144 LocalChange = performPromotion(F);
145 if (!LocalChange) break; // No need to re-scalarrepl if no promotion
152 bool SROA::performPromotion(Function &F) {
153 std::vector<AllocaInst*> Allocas;
154 DominatorTree &DT = getAnalysis<DominatorTree>();
155 DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
157 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
159 bool Changed = false;
164 // Find allocas that are safe to promote, by looking at all instructions in
166 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
167 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?
168 if (isAllocaPromotable(AI))
169 Allocas.push_back(AI);
171 if (Allocas.empty()) break;
173 PromoteMemToReg(Allocas, DT, DF);
174 NumPromoted += Allocas.size();
181 // performScalarRepl - This algorithm is a simple worklist driven algorithm,
182 // which runs on all of the malloc/alloca instructions in the function, removing
183 // them if they are only used by getelementptr instructions.
185 bool SROA::performScalarRepl(Function &F) {
186 std::vector<AllocationInst*> WorkList;
188 // Scan the entry basic block, adding any alloca's and mallocs to the worklist
189 BasicBlock &BB = F.getEntryBlock();
190 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
191 if (AllocationInst *A = dyn_cast<AllocationInst>(I))
192 WorkList.push_back(A);
194 const TargetData &TD = getAnalysis<TargetData>();
196 // Process the worklist
197 bool Changed = false;
198 while (!WorkList.empty()) {
199 AllocationInst *AI = WorkList.back();
202 // Handle dead allocas trivially. These can be formed by SROA'ing arrays
203 // with unused elements.
204 if (AI->use_empty()) {
205 AI->eraseFromParent();
209 // If we can turn this aggregate value (potentially with casts) into a
210 // simple scalar value that can be mem2reg'd into a register value.
211 bool IsNotTrivial = false;
212 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial))
213 if (IsNotTrivial && ActualType != Type::VoidTy) {
214 ConvertToScalar(AI, ActualType);
219 // Check to see if we can perform the core SROA transformation. We cannot
220 // transform the allocation instruction if it is an array allocation
221 // (allocations OF arrays are ok though), and an allocation of a scalar
222 // value cannot be decomposed at all.
223 if (!AI->isArrayAllocation() &&
224 (isa<StructType>(AI->getAllocatedType()) ||
225 isa<ArrayType>(AI->getAllocatedType())) &&
226 AI->getAllocatedType()->isSized() &&
227 TD.getABITypeSize(AI->getAllocatedType()) < SRThreshold) {
228 // Check that all of the users of the allocation are capable of being
230 switch (isSafeAllocaToScalarRepl(AI)) {
231 default: assert(0 && "Unexpected value!");
232 case 0: // Not safe to scalar replace.
234 case 1: // Safe, but requires cleanup/canonicalizations first
235 CanonicalizeAllocaUsers(AI);
237 case 3: // Safe to scalar replace.
238 DoScalarReplacement(AI, WorkList);
244 // Check to see if this allocation is only modified by a memcpy/memmove from
245 // a constant global. If this is the case, we can change all users to use
246 // the constant global instead. This is commonly produced by the CFE by
247 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
248 // is only subsequently read.
249 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) {
250 DOUT << "Found alloca equal to global: " << *AI;
251 DOUT << " memcpy = " << *TheCopy;
252 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2));
253 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
254 TheCopy->eraseFromParent(); // Don't mutate the global.
255 AI->eraseFromParent();
261 // Otherwise, couldn't process this.
267 /// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
268 /// predicate, do SROA now.
269 void SROA::DoScalarReplacement(AllocationInst *AI,
270 std::vector<AllocationInst*> &WorkList) {
271 DOUT << "Found inst to SROA: " << *AI;
272 SmallVector<AllocaInst*, 32> ElementAllocas;
273 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
274 ElementAllocas.reserve(ST->getNumContainedTypes());
275 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
276 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
278 AI->getName() + "." + utostr(i), AI);
279 ElementAllocas.push_back(NA);
280 WorkList.push_back(NA); // Add to worklist for recursive processing
283 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
284 ElementAllocas.reserve(AT->getNumElements());
285 const Type *ElTy = AT->getElementType();
286 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
287 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
288 AI->getName() + "." + utostr(i), AI);
289 ElementAllocas.push_back(NA);
290 WorkList.push_back(NA); // Add to worklist for recursive processing
294 // Now that we have created the alloca instructions that we want to use,
295 // expand the getelementptr instructions to use them.
297 while (!AI->use_empty()) {
298 Instruction *User = cast<Instruction>(AI->use_back());
299 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) {
300 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas);
301 BCInst->eraseFromParent();
305 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
306 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
308 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
310 assert(Idx < ElementAllocas.size() && "Index out of range?");
311 AllocaInst *AllocaToUse = ElementAllocas[Idx];
314 if (GEPI->getNumOperands() == 3) {
315 // Do not insert a new getelementptr instruction with zero indices, only
316 // to have it optimized out later.
317 RepValue = AllocaToUse;
319 // We are indexing deeply into the structure, so we still need a
320 // getelement ptr instruction to finish the indexing. This may be
321 // expanded itself once the worklist is rerun.
323 SmallVector<Value*, 8> NewArgs;
324 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty));
325 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end());
326 RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(),
327 NewArgs.end(), "", GEPI);
328 RepValue->takeName(GEPI);
331 // If this GEP is to the start of the aggregate, check for memcpys.
333 bool IsStartOfAggregateGEP = true;
334 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) {
335 if (!isa<ConstantInt>(GEPI->getOperand(i))) {
336 IsStartOfAggregateGEP = false;
339 if (!cast<ConstantInt>(GEPI->getOperand(i))->isZero()) {
340 IsStartOfAggregateGEP = false;
345 if (IsStartOfAggregateGEP)
346 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas);
350 // Move all of the users over to the new GEP.
351 GEPI->replaceAllUsesWith(RepValue);
352 // Delete the old GEP
353 GEPI->eraseFromParent();
356 // Finally, delete the Alloca instruction
357 AI->eraseFromParent();
362 /// isSafeElementUse - Check to see if this use is an allowed use for a
363 /// getelementptr instruction of an array aggregate allocation. isFirstElt
364 /// indicates whether Ptr is known to the start of the aggregate.
366 void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI,
368 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
370 Instruction *User = cast<Instruction>(*I);
371 switch (User->getOpcode()) {
372 case Instruction::Load: break;
373 case Instruction::Store:
374 // Store is ok if storing INTO the pointer, not storing the pointer
375 if (User->getOperand(0) == Ptr) return MarkUnsafe(Info);
377 case Instruction::GetElementPtr: {
378 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User);
379 bool AreAllZeroIndices = isFirstElt;
380 if (GEP->getNumOperands() > 1) {
381 if (!isa<ConstantInt>(GEP->getOperand(1)) ||
382 !cast<ConstantInt>(GEP->getOperand(1))->isZero())
383 // Using pointer arithmetic to navigate the array.
384 return MarkUnsafe(Info);
386 if (AreAllZeroIndices) {
387 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) {
388 if (!isa<ConstantInt>(GEP->getOperand(i)) ||
389 !cast<ConstantInt>(GEP->getOperand(i))->isZero()) {
390 AreAllZeroIndices = false;
396 isSafeElementUse(GEP, AreAllZeroIndices, AI, Info);
397 if (Info.isUnsafe) return;
400 case Instruction::BitCast:
402 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI, Info);
403 if (Info.isUnsafe) return;
406 DOUT << " Transformation preventing inst: " << *User;
407 return MarkUnsafe(Info);
408 case Instruction::Call:
409 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
411 isSafeMemIntrinsicOnAllocation(MI, AI, I.getOperandNo(), Info);
412 if (Info.isUnsafe) return;
416 DOUT << " Transformation preventing inst: " << *User;
417 return MarkUnsafe(Info);
419 DOUT << " Transformation preventing inst: " << *User;
420 return MarkUnsafe(Info);
423 return; // All users look ok :)
426 /// AllUsersAreLoads - Return true if all users of this value are loads.
427 static bool AllUsersAreLoads(Value *Ptr) {
428 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
430 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load)
435 /// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
436 /// aggregate allocation.
438 void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
440 if (BitCastInst *C = dyn_cast<BitCastInst>(User))
441 return isSafeUseOfBitCastedAllocation(C, AI, Info);
443 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User);
445 return MarkUnsafe(Info);
447 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI);
449 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>".
451 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) {
452 return MarkUnsafe(Info);
456 if (I == E) return MarkUnsafe(Info); // ran out of GEP indices??
458 bool IsAllZeroIndices = true;
460 // If this is a use of an array allocation, do a bit more checking for sanity.
461 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
462 uint64_t NumElements = AT->getNumElements();
464 if (ConstantInt *Idx = dyn_cast<ConstantInt>(I.getOperand())) {
465 IsAllZeroIndices &= Idx->isZero();
467 // Check to make sure that index falls within the array. If not,
468 // something funny is going on, so we won't do the optimization.
470 if (Idx->getZExtValue() >= NumElements)
471 return MarkUnsafe(Info);
473 // We cannot scalar repl this level of the array unless any array
474 // sub-indices are in-range constants. In particular, consider:
475 // A[0][i]. We cannot know that the user isn't doing invalid things like
476 // allowing i to index an out-of-range subscript that accesses A[1].
478 // Scalar replacing *just* the outer index of the array is probably not
479 // going to be a win anyway, so just give up.
480 for (++I; I != E && (isa<ArrayType>(*I) || isa<VectorType>(*I)); ++I) {
481 uint64_t NumElements;
482 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*I))
483 NumElements = SubArrayTy->getNumElements();
485 NumElements = cast<VectorType>(*I)->getNumElements();
487 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand());
488 if (!IdxVal) return MarkUnsafe(Info);
489 if (IdxVal->getZExtValue() >= NumElements)
490 return MarkUnsafe(Info);
491 IsAllZeroIndices &= IdxVal->isZero();
495 IsAllZeroIndices = 0;
497 // If this is an array index and the index is not constant, we cannot
498 // promote... that is unless the array has exactly one or two elements in
499 // it, in which case we CAN promote it, but we have to canonicalize this
500 // out if this is the only problem.
501 if ((NumElements == 1 || NumElements == 2) &&
502 AllUsersAreLoads(GEPI)) {
503 Info.needsCanon = true;
504 return; // Canonicalization required!
506 return MarkUnsafe(Info);
510 // If there are any non-simple uses of this getelementptr, make sure to reject
512 return isSafeElementUse(GEPI, IsAllZeroIndices, AI, Info);
515 /// isSafeMemIntrinsicOnAllocation - Return true if the specified memory
516 /// intrinsic can be promoted by SROA. At this point, we know that the operand
517 /// of the memintrinsic is a pointer to the beginning of the allocation.
518 void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
519 unsigned OpNo, AllocaInfo &Info) {
520 // If not constant length, give up.
521 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
522 if (!Length) return MarkUnsafe(Info);
524 // If not the whole aggregate, give up.
525 const TargetData &TD = getAnalysis<TargetData>();
526 if (Length->getZExtValue() !=
527 TD.getABITypeSize(AI->getType()->getElementType()))
528 return MarkUnsafe(Info);
530 // We only know about memcpy/memset/memmove.
531 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI))
532 return MarkUnsafe(Info);
534 // Otherwise, we can transform it. Determine whether this is a memcpy/set
535 // into or out of the aggregate.
537 Info.isMemCpyDst = true;
540 Info.isMemCpySrc = true;
544 /// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast
546 void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI,
548 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end();
550 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) {
551 isSafeUseOfBitCastedAllocation(BCU, AI, Info);
552 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) {
553 isSafeMemIntrinsicOnAllocation(MI, AI, UI.getOperandNo(), Info);
555 return MarkUnsafe(Info);
557 if (Info.isUnsafe) return;
561 /// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes
562 /// to its first element. Transform users of the cast to use the new values
564 void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
565 SmallVector<AllocaInst*, 32> &NewElts) {
566 Constant *Zero = Constant::getNullValue(Type::Int32Ty);
567 const TargetData &TD = getAnalysis<TargetData>();
569 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end();
571 if (BitCastInst *BCU = dyn_cast<BitCastInst>(*UI)) {
572 RewriteBitCastUserOfAlloca(BCU, AI, NewElts);
574 BCU->eraseFromParent();
578 // Otherwise, must be memcpy/memmove/memset of the entire aggregate. Split
579 // into one per element.
580 MemIntrinsic *MI = dyn_cast<MemIntrinsic>(*UI);
582 // If it's not a mem intrinsic, it must be some other user of a gep of the
583 // first pointer. Just leave these alone.
589 // If this is a memcpy/memmove, construct the other pointer as the
592 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) {
593 if (BCInst == MCI->getRawDest())
594 OtherPtr = MCI->getRawSource();
596 assert(BCInst == MCI->getRawSource());
597 OtherPtr = MCI->getRawDest();
599 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
600 if (BCInst == MMI->getRawDest())
601 OtherPtr = MMI->getRawSource();
603 assert(BCInst == MMI->getRawSource());
604 OtherPtr = MMI->getRawDest();
608 // If there is an other pointer, we want to convert it to the same pointer
609 // type as AI has, so we can GEP through it.
611 // It is likely that OtherPtr is a bitcast, if so, remove it.
612 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr))
613 OtherPtr = BC->getOperand(0);
614 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr))
615 if (BCE->getOpcode() == Instruction::BitCast)
616 OtherPtr = BCE->getOperand(0);
618 // If the pointer is not the right type, insert a bitcast to the right
620 if (OtherPtr->getType() != AI->getType())
621 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(),
625 // Process each element of the aggregate.
626 Value *TheFn = MI->getOperand(0);
627 const Type *BytePtrTy = MI->getRawDest()->getType();
628 bool SROADest = MI->getRawDest() == BCInst;
630 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
631 // If this is a memcpy/memmove, emit a GEP of the other element address.
636 Idx[1] = ConstantInt::get(Type::Int32Ty, i);
637 OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2,
638 OtherPtr->getNameStr()+"."+utostr(i),
642 Value *EltPtr = NewElts[i];
643 const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType();
645 // If we got down to a scalar, insert a load or store as appropriate.
646 if (EltTy->isFirstClassType()) {
647 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
648 Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp",
650 new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI);
653 assert(isa<MemSetInst>(MI));
655 // If the stored element is zero (common case), just store a null
658 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
660 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0>
662 // If EltTy is a vector type, get the element type.
663 const Type *ValTy = EltTy;
664 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy))
665 ValTy = VTy->getElementType();
667 // Construct an integer with the right value.
668 unsigned EltSize = TD.getTypeSizeInBits(ValTy);
669 APInt OneVal(EltSize, CI->getZExtValue());
670 APInt TotalVal(OneVal);
672 for (unsigned i = 0; 8*i < EltSize; ++i) {
673 TotalVal = TotalVal.shl(8);
677 // Convert the integer value to the appropriate type.
678 StoreVal = ConstantInt::get(TotalVal);
679 if (isa<PointerType>(ValTy))
680 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
681 else if (ValTy->isFloatingPoint())
682 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
683 assert(StoreVal->getType() == ValTy && "Type mismatch!");
685 // If the requested value was a vector constant, create it.
686 if (EltTy != ValTy) {
687 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
688 SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
689 StoreVal = ConstantVector::get(&Elts[0], NumElts);
692 new StoreInst(StoreVal, EltPtr, MI);
695 // Otherwise, if we're storing a byte variable, use a memset call for
700 // Cast the element pointer to BytePtrTy.
701 if (EltPtr->getType() != BytePtrTy)
702 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI);
704 // Cast the other pointer (if we have one) to BytePtrTy.
705 if (OtherElt && OtherElt->getType() != BytePtrTy)
706 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(),
709 unsigned EltSize = TD.getABITypeSize(EltTy);
711 // Finally, insert the meminst for this element.
712 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
714 SROADest ? EltPtr : OtherElt, // Dest ptr
715 SROADest ? OtherElt : EltPtr, // Src ptr
716 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
719 CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
721 assert(isa<MemSetInst>(MI));
723 EltPtr, MI->getOperand(2), // Dest, Value,
724 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
727 CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
731 // Finally, MI is now dead, as we've modified its actions to occur on all of
732 // the elements of the aggregate.
734 MI->eraseFromParent();
738 /// HasPadding - Return true if the specified type has any structure or
739 /// alignment padding, false otherwise.
740 static bool HasPadding(const Type *Ty, const TargetData &TD,
741 bool inPacked = false) {
742 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
743 const StructLayout *SL = TD.getStructLayout(STy);
744 unsigned PrevFieldBitOffset = 0;
745 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
746 unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
748 // Padding in sub-elements?
749 if (HasPadding(STy->getElementType(i), TD, STy->isPacked()))
752 // Check to see if there is any padding between this element and the
755 unsigned PrevFieldEnd =
756 PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1));
757 if (PrevFieldEnd < FieldBitOffset)
761 PrevFieldBitOffset = FieldBitOffset;
764 // Check for tail padding.
765 if (unsigned EltCount = STy->getNumElements()) {
766 unsigned PrevFieldEnd = PrevFieldBitOffset +
767 TD.getTypeSizeInBits(STy->getElementType(EltCount-1));
768 if (PrevFieldEnd < SL->getSizeInBits())
772 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
773 return HasPadding(ATy->getElementType(), TD, false);
774 } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
775 return HasPadding(VTy->getElementType(), TD, false);
778 false : TD.getTypeSizeInBits(Ty) != TD.getABITypeSizeInBits(Ty);
781 /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
782 /// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
783 /// or 1 if safe after canonicalization has been performed.
785 int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) {
786 // Loop over the use list of the alloca. We can only transform it if all of
787 // the users are safe to transform.
790 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end();
792 isSafeUseOfAllocation(cast<Instruction>(*I), AI, Info);
794 DOUT << "Cannot transform: " << *AI << " due to user: " << **I;
799 // Okay, we know all the users are promotable. If the aggregate is a memcpy
800 // source and destination, we have to be careful. In particular, the memcpy
801 // could be moving around elements that live in structure padding of the LLVM
802 // types, but may actually be used. In these cases, we refuse to promote the
804 if (Info.isMemCpySrc && Info.isMemCpyDst &&
805 HasPadding(AI->getType()->getElementType(), getAnalysis<TargetData>()))
808 // If we require cleanup, return 1, otherwise return 3.
809 return Info.needsCanon ? 1 : 3;
812 /// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified
813 /// allocation, but only if cleaned up, perform the cleanups required.
814 void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) {
815 // At this point, we know that the end result will be SROA'd and promoted, so
816 // we can insert ugly code if required so long as sroa+mem2reg will clean it
818 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
820 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI++);
822 gep_type_iterator I = gep_type_begin(GEPI);
825 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
826 uint64_t NumElements = AT->getNumElements();
828 if (!isa<ConstantInt>(I.getOperand())) {
829 if (NumElements == 1) {
830 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty));
832 assert(NumElements == 2 && "Unhandled case!");
833 // All users of the GEP must be loads. At each use of the GEP, insert
834 // two loads of the appropriate indexed GEP and select between them.
835 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(),
836 Constant::getNullValue(I.getOperand()->getType()),
838 // Insert the new GEP instructions, which are properly indexed.
839 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end());
840 Indices[1] = Constant::getNullValue(Type::Int32Ty);
841 Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0),
844 GEPI->getName()+".0", GEPI);
845 Indices[1] = ConstantInt::get(Type::Int32Ty, 1);
846 Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0),
849 GEPI->getName()+".1", GEPI);
850 // Replace all loads of the variable index GEP with loads from both
851 // indexes and a select.
852 while (!GEPI->use_empty()) {
853 LoadInst *LI = cast<LoadInst>(GEPI->use_back());
854 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI);
855 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI);
856 Value *R = SelectInst::Create(IsOne, One, Zero, LI->getName(), LI);
857 LI->replaceAllUsesWith(R);
858 LI->eraseFromParent();
860 GEPI->eraseFromParent();
867 /// MergeInType - Add the 'In' type to the accumulated type so far. If the
868 /// types are incompatible, return true, otherwise update Accum and return
871 /// There are three cases we handle here:
872 /// 1) An effectively-integer union, where the pieces are stored into as
873 /// smaller integers (common with byte swap and other idioms).
874 /// 2) A union of vector types of the same size and potentially its elements.
875 /// Here we turn element accesses into insert/extract element operations.
876 /// 3) A union of scalar types, such as int/float or int/pointer. Here we
877 /// merge together into integers, allowing the xform to work with #1 as
879 static bool MergeInType(const Type *In, const Type *&Accum,
880 const TargetData &TD) {
881 // If this is our first type, just use it.
882 const VectorType *PTy;
883 if (Accum == Type::VoidTy || In == Accum) {
885 } else if (In == Type::VoidTy) {
887 } else if (In->isInteger() && Accum->isInteger()) { // integer union.
888 // Otherwise pick whichever type is larger.
889 if (cast<IntegerType>(In)->getBitWidth() >
890 cast<IntegerType>(Accum)->getBitWidth())
892 } else if (isa<PointerType>(In) && isa<PointerType>(Accum)) {
893 // Pointer unions just stay as one of the pointers.
894 } else if (isa<VectorType>(In) || isa<VectorType>(Accum)) {
895 if ((PTy = dyn_cast<VectorType>(Accum)) &&
896 PTy->getElementType() == In) {
897 // Accum is a vector, and we are accessing an element: ok.
898 } else if ((PTy = dyn_cast<VectorType>(In)) &&
899 PTy->getElementType() == Accum) {
900 // In is a vector, and accum is an element: ok, remember In.
902 } else if ((PTy = dyn_cast<VectorType>(In)) && isa<VectorType>(Accum) &&
903 PTy->getBitWidth() == cast<VectorType>(Accum)->getBitWidth()) {
904 // Two vectors of the same size: keep Accum.
906 // Cannot insert an short into a <4 x int> or handle
907 // <2 x int> -> <4 x int>
911 // Pointer/FP/Integer unions merge together as integers.
912 switch (Accum->getTypeID()) {
913 case Type::PointerTyID: Accum = TD.getIntPtrType(); break;
914 case Type::FloatTyID: Accum = Type::Int32Ty; break;
915 case Type::DoubleTyID: Accum = Type::Int64Ty; break;
916 case Type::X86_FP80TyID: return true;
917 case Type::FP128TyID: return true;
918 case Type::PPC_FP128TyID: return true;
920 assert(Accum->isInteger() && "Unknown FP type!");
924 switch (In->getTypeID()) {
925 case Type::PointerTyID: In = TD.getIntPtrType(); break;
926 case Type::FloatTyID: In = Type::Int32Ty; break;
927 case Type::DoubleTyID: In = Type::Int64Ty; break;
928 case Type::X86_FP80TyID: return true;
929 case Type::FP128TyID: return true;
930 case Type::PPC_FP128TyID: return true;
932 assert(In->isInteger() && "Unknown FP type!");
935 return MergeInType(In, Accum, TD);
940 /// getUIntAtLeastAsBigAs - Return an unsigned integer type that is at least
941 /// as big as the specified type. If there is no suitable type, this returns
943 const Type *getUIntAtLeastAsBigAs(unsigned NumBits) {
944 if (NumBits > 64) return 0;
945 if (NumBits > 32) return Type::Int64Ty;
946 if (NumBits > 16) return Type::Int32Ty;
947 if (NumBits > 8) return Type::Int16Ty;
951 /// CanConvertToScalar - V is a pointer. If we can convert the pointee to a
952 /// single scalar integer type, return that type. Further, if the use is not
953 /// a completely trivial use that mem2reg could promote, set IsNotTrivial. If
954 /// there are no uses of this pointer, return Type::VoidTy to differentiate from
957 const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) {
958 const Type *UsedType = Type::VoidTy; // No uses, no forced type.
959 const TargetData &TD = getAnalysis<TargetData>();
960 const PointerType *PTy = cast<PointerType>(V->getType());
962 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
963 Instruction *User = cast<Instruction>(*UI);
965 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
966 if (MergeInType(LI->getType(), UsedType, TD))
969 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
970 // Storing the pointer, not into the value?
971 if (SI->getOperand(0) == V) return 0;
973 // NOTE: We could handle storing of FP imms into integers here!
975 if (MergeInType(SI->getOperand(0)->getType(), UsedType, TD))
977 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
979 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial);
980 if (!SubTy || MergeInType(SubTy, UsedType, TD)) return 0;
981 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
982 // Check to see if this is stepping over an element: GEP Ptr, int C
983 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) {
984 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue();
985 unsigned ElSize = TD.getABITypeSize(PTy->getElementType());
986 unsigned BitOffset = Idx*ElSize*8;
987 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0;
990 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial);
991 if (SubElt == 0) return 0;
992 if (SubElt != Type::VoidTy && SubElt->isInteger()) {
994 getUIntAtLeastAsBigAs(TD.getABITypeSizeInBits(SubElt)+BitOffset);
995 if (NewTy == 0 || MergeInType(NewTy, UsedType, TD)) return 0;
998 } else if (GEP->getNumOperands() == 3 &&
999 isa<ConstantInt>(GEP->getOperand(1)) &&
1000 isa<ConstantInt>(GEP->getOperand(2)) &&
1001 cast<ConstantInt>(GEP->getOperand(1))->isZero()) {
1002 // We are stepping into an element, e.g. a structure or an array:
1003 // GEP Ptr, int 0, uint C
1004 const Type *AggTy = PTy->getElementType();
1005 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
1007 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) {
1008 if (Idx >= ATy->getNumElements()) return 0; // Out of range.
1009 } else if (const VectorType *VectorTy = dyn_cast<VectorType>(AggTy)) {
1010 // Getting an element of the vector.
1011 if (Idx >= VectorTy->getNumElements()) return 0; // Out of range.
1013 // Merge in the vector type.
1014 if (MergeInType(VectorTy, UsedType, TD)) return 0;
1016 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
1017 if (SubTy == 0) return 0;
1019 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD))
1022 // We'll need to change this to an insert/extract element operation.
1023 IsNotTrivial = true;
1024 continue; // Everything looks ok
1026 } else if (isa<StructType>(AggTy)) {
1027 // Structs are always ok.
1031 const Type *NTy = getUIntAtLeastAsBigAs(TD.getABITypeSizeInBits(AggTy));
1032 if (NTy == 0 || MergeInType(NTy, UsedType, TD)) return 0;
1033 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
1034 if (SubTy == 0) return 0;
1035 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD))
1037 continue; // Everything looks ok
1041 // Cannot handle this!
1049 /// ConvertToScalar - The specified alloca passes the CanConvertToScalar
1050 /// predicate and is non-trivial. Convert it to something that can be trivially
1051 /// promoted into a register by mem2reg.
1052 void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) {
1053 DOUT << "CONVERT TO SCALAR: " << *AI << " TYPE = "
1054 << *ActualTy << "\n";
1057 BasicBlock *EntryBlock = AI->getParent();
1058 assert(EntryBlock == &EntryBlock->getParent()->getEntryBlock() &&
1059 "Not in the entry block!");
1060 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program.
1062 // Create and insert the alloca.
1063 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(),
1064 EntryBlock->begin());
1065 ConvertUsesToScalar(AI, NewAI, 0);
1070 /// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
1071 /// directly. This happens when we are converting an "integer union" to a
1072 /// single integer scalar, or when we are converting a "vector union" to a
1073 /// vector with insert/extractelement instructions.
1075 /// Offset is an offset from the original alloca, in bits that need to be
1076 /// shifted to the right. By the end of this, there should be no uses of Ptr.
1077 void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
1078 while (!Ptr->use_empty()) {
1079 Instruction *User = cast<Instruction>(Ptr->use_back());
1081 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1082 Value *NV = ConvertUsesOfLoadToScalar(LI, NewAI, Offset);
1083 LI->replaceAllUsesWith(NV);
1084 LI->eraseFromParent();
1085 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1086 assert(SI->getOperand(0) != Ptr && "Consistency error!");
1088 Value *SV = ConvertUsesOfStoreToScalar(SI, NewAI, Offset);
1089 new StoreInst(SV, NewAI, SI);
1090 SI->eraseFromParent();
1092 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
1093 ConvertUsesToScalar(CI, NewAI, Offset);
1094 CI->eraseFromParent();
1095 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
1096 const PointerType *AggPtrTy =
1097 cast<PointerType>(GEP->getOperand(0)->getType());
1098 const TargetData &TD = getAnalysis<TargetData>();
1099 unsigned AggSizeInBits =
1100 TD.getABITypeSizeInBits(AggPtrTy->getElementType());
1102 // Check to see if this is stepping over an element: GEP Ptr, int C
1103 unsigned NewOffset = Offset;
1104 if (GEP->getNumOperands() == 2) {
1105 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue();
1106 unsigned BitOffset = Idx*AggSizeInBits;
1108 NewOffset += BitOffset;
1109 } else if (GEP->getNumOperands() == 3) {
1110 // We know that operand #2 is zero.
1111 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
1112 const Type *AggTy = AggPtrTy->getElementType();
1113 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) {
1114 unsigned ElSizeBits =
1115 TD.getABITypeSizeInBits(SeqTy->getElementType());
1117 NewOffset += ElSizeBits*Idx;
1118 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) {
1119 unsigned EltBitOffset =
1120 TD.getStructLayout(STy)->getElementOffsetInBits(Idx);
1122 NewOffset += EltBitOffset;
1124 assert(0 && "Unsupported operation!");
1128 assert(0 && "Unsupported operation!");
1131 ConvertUsesToScalar(GEP, NewAI, NewOffset);
1132 GEP->eraseFromParent();
1134 assert(0 && "Unsupported operation!");
1140 /// ConvertUsesOfLoadToScalar - Convert all of the users the specified load to
1141 /// use the new alloca directly, returning the value that should replace the
1142 /// load. This happens when we are converting an "integer union" to a
1143 /// single integer scalar, or when we are converting a "vector union" to a
1144 /// vector with insert/extractelement instructions.
1146 /// Offset is an offset from the original alloca, in bits that need to be
1147 /// shifted to the right. By the end of this, there should be no uses of Ptr.
1148 Value *SROA::ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI,
1150 // The load is a bit extract from NewAI shifted right by Offset bits.
1151 Value *NV = new LoadInst(NewAI, LI->getName(), LI);
1153 if (NV->getType() == LI->getType() && Offset == 0) {
1154 // We win, no conversion needed.
1158 // If the result type of the 'union' is a pointer, then this must be ptr->ptr
1159 // cast. Anything else would result in NV being an integer.
1160 if (isa<PointerType>(NV->getType())) {
1161 assert(isa<PointerType>(LI->getType()));
1162 return new BitCastInst(NV, LI->getType(), LI->getName(), LI);
1165 if (const VectorType *VTy = dyn_cast<VectorType>(NV->getType())) {
1166 // If the result alloca is a vector type, this is either an element
1167 // access or a bitcast to another vector type.
1168 if (isa<VectorType>(LI->getType()))
1169 return new BitCastInst(NV, LI->getType(), LI->getName(), LI);
1171 // Otherwise it must be an element access.
1172 const TargetData &TD = getAnalysis<TargetData>();
1175 unsigned EltSize = TD.getABITypeSizeInBits(VTy->getElementType());
1176 Elt = Offset/EltSize;
1177 Offset -= EltSize*Elt;
1179 NV = new ExtractElementInst(NV, ConstantInt::get(Type::Int32Ty, Elt),
1182 // If we're done, return this element.
1183 if (NV->getType() == LI->getType() && Offset == 0)
1187 const IntegerType *NTy = cast<IntegerType>(NV->getType());
1189 // If this is a big-endian system and the load is narrower than the
1190 // full alloca type, we need to do a shift to get the right bits.
1192 const TargetData &TD = getAnalysis<TargetData>();
1193 if (TD.isBigEndian()) {
1194 // On big-endian machines, the lowest bit is stored at the bit offset
1195 // from the pointer given by getTypeStoreSizeInBits. This matters for
1196 // integers with a bitwidth that is not a multiple of 8.
1197 ShAmt = TD.getTypeStoreSizeInBits(NTy) -
1198 TD.getTypeStoreSizeInBits(LI->getType()) - Offset;
1203 // Note: we support negative bitwidths (with shl) which are not defined.
1204 // We do this to support (f.e.) loads off the end of a structure where
1205 // only some bits are used.
1206 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
1207 NV = BinaryOperator::CreateLShr(NV,
1208 ConstantInt::get(NV->getType(),ShAmt),
1210 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
1211 NV = BinaryOperator::CreateShl(NV,
1212 ConstantInt::get(NV->getType(),-ShAmt),
1215 // Finally, unconditionally truncate the integer to the right width.
1216 unsigned LIBitWidth = TD.getTypeSizeInBits(LI->getType());
1217 if (LIBitWidth < NTy->getBitWidth())
1218 NV = new TruncInst(NV, IntegerType::get(LIBitWidth),
1221 // If the result is an integer, this is a trunc or bitcast.
1222 if (isa<IntegerType>(LI->getType())) {
1224 } else if (LI->getType()->isFloatingPoint()) {
1225 // Just do a bitcast, we know the sizes match up.
1226 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
1228 // Otherwise must be a pointer.
1229 NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI);
1231 assert(NV->getType() == LI->getType() && "Didn't convert right?");
1236 /// ConvertUsesOfStoreToScalar - Convert the specified store to a load+store
1237 /// pair of the new alloca directly, returning the value that should be stored
1238 /// to the alloca. This happens when we are converting an "integer union" to a
1239 /// single integer scalar, or when we are converting a "vector union" to a
1240 /// vector with insert/extractelement instructions.
1242 /// Offset is an offset from the original alloca, in bits that need to be
1243 /// shifted to the right. By the end of this, there should be no uses of Ptr.
1244 Value *SROA::ConvertUsesOfStoreToScalar(StoreInst *SI, AllocaInst *NewAI,
1247 // Convert the stored type to the actual type, shift it left to insert
1248 // then 'or' into place.
1249 Value *SV = SI->getOperand(0);
1250 const Type *AllocaType = NewAI->getType()->getElementType();
1251 if (SV->getType() == AllocaType && Offset == 0) {
1253 } else if (const VectorType *PTy = dyn_cast<VectorType>(AllocaType)) {
1254 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
1256 // If the result alloca is a vector type, this is either an element
1257 // access or a bitcast to another vector type.
1258 if (isa<VectorType>(SV->getType())) {
1259 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI);
1261 // Must be an element insertion.
1262 const TargetData &TD = getAnalysis<TargetData>();
1263 unsigned Elt = Offset/TD.getABITypeSizeInBits(PTy->getElementType());
1264 SV = InsertElementInst::Create(Old, SV,
1265 ConstantInt::get(Type::Int32Ty, Elt),
1268 } else if (isa<PointerType>(AllocaType)) {
1269 // If the alloca type is a pointer, then all the elements must be
1271 if (SV->getType() != AllocaType)
1272 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI);
1274 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
1276 // If SV is a float, convert it to the appropriate integer type.
1277 // If it is a pointer, do the same, and also handle ptr->ptr casts
1279 const TargetData &TD = getAnalysis<TargetData>();
1280 unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType());
1281 unsigned DestWidth = TD.getTypeSizeInBits(AllocaType);
1282 unsigned SrcStoreWidth = TD.getTypeStoreSizeInBits(SV->getType());
1283 unsigned DestStoreWidth = TD.getTypeStoreSizeInBits(AllocaType);
1284 if (SV->getType()->isFloatingPoint())
1285 SV = new BitCastInst(SV, IntegerType::get(SrcWidth),
1287 else if (isa<PointerType>(SV->getType()))
1288 SV = new PtrToIntInst(SV, TD.getIntPtrType(), SV->getName(), SI);
1290 // Always zero extend the value if needed.
1291 if (SV->getType() != AllocaType)
1292 SV = new ZExtInst(SV, AllocaType, SV->getName(), SI);
1294 // If this is a big-endian system and the store is narrower than the
1295 // full alloca type, we need to do a shift to get the right bits.
1297 if (TD.isBigEndian()) {
1298 // On big-endian machines, the lowest bit is stored at the bit offset
1299 // from the pointer given by getTypeStoreSizeInBits. This matters for
1300 // integers with a bitwidth that is not a multiple of 8.
1301 ShAmt = DestStoreWidth - SrcStoreWidth - Offset;
1306 // Note: we support negative bitwidths (with shr) which are not defined.
1307 // We do this to support (f.e.) stores off the end of a structure where
1308 // only some bits in the structure are set.
1309 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
1310 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
1311 SV = BinaryOperator::CreateShl(SV,
1312 ConstantInt::get(SV->getType(), ShAmt),
1315 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
1316 SV = BinaryOperator::CreateLShr(SV,
1317 ConstantInt::get(SV->getType(),-ShAmt),
1319 Mask = Mask.lshr(ShAmt);
1322 // Mask out the bits we are about to insert from the old value, and or
1324 if (SrcWidth != DestWidth) {
1325 assert(DestWidth > SrcWidth);
1326 Old = BinaryOperator::CreateAnd(Old, ConstantInt::get(~Mask),
1327 Old->getName()+".mask", SI);
1328 SV = BinaryOperator::CreateOr(Old, SV, SV->getName()+".ins", SI);
1336 /// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
1337 /// some part of a constant global variable. This intentionally only accepts
1338 /// constant expressions because we don't can't rewrite arbitrary instructions.
1339 static bool PointsToConstantGlobal(Value *V) {
1340 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
1341 return GV->isConstant();
1342 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
1343 if (CE->getOpcode() == Instruction::BitCast ||
1344 CE->getOpcode() == Instruction::GetElementPtr)
1345 return PointsToConstantGlobal(CE->getOperand(0));
1349 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
1350 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
1351 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
1352 /// track of whether it moves the pointer (with isOffset) but otherwise traverse
1353 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
1354 /// the alloca, and if the source pointer is a pointer to a constant global, we
1355 /// can optimize this.
1356 static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
1358 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
1359 if (isa<LoadInst>(*UI)) {
1360 // Ignore loads, they are always ok.
1363 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) {
1364 // If uses of the bitcast are ok, we are ok.
1365 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset))
1369 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
1370 // If the GEP has all zero indices, it doesn't offset the pointer. If it
1371 // doesn't, it does.
1372 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy,
1373 isOffset || !GEP->hasAllZeroIndices()))
1378 // If this is isn't our memcpy/memmove, reject it as something we can't
1380 if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI))
1383 // If we already have seen a copy, reject the second one.
1384 if (TheCopy) return false;
1386 // If the pointer has been offset from the start of the alloca, we can't
1387 // safely handle this.
1388 if (isOffset) return false;
1390 // If the memintrinsic isn't using the alloca as the dest, reject it.
1391 if (UI.getOperandNo() != 1) return false;
1393 MemIntrinsic *MI = cast<MemIntrinsic>(*UI);
1395 // If the source of the memcpy/move is not a constant global, reject it.
1396 if (!PointsToConstantGlobal(MI->getOperand(2)))
1399 // Otherwise, the transform is safe. Remember the copy instruction.
1405 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
1406 /// modified by a copy from a constant global. If we can prove this, we can
1407 /// replace any uses of the alloca with uses of the global directly.
1408 Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) {
1409 Instruction *TheCopy = 0;
1410 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false))