// Start with registering info about how the
// target lays out data structures.
module->setDataLayout(executionEngine->getDataLayout());
- fpm.add(new llvm::DataLayoutPass());
// Optimizations turned on
#ifdef ADD_OPT_PASSES
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.
OpenModule->setDataLayout(NewEngine->getDataLayout());
- FPM->add(new DataLayoutPass());
// Provide basic AliasAnalysis support for GVN.
FPM->add(createBasicAliasAnalysisPass());
// Promote allocas to registers.
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.
TheModule->setDataLayout(TheExecutionEngine->getDataLayout());
- OurFPM.add(new DataLayoutPass());
// Provide basic AliasAnalysis support for GVN.
OurFPM.add(createBasicAliasAnalysisPass());
// Do simple "peephole" optimizations and bit-twiddling optzns.
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.
TheModule->setDataLayout(TheExecutionEngine->getDataLayout());
- OurFPM.add(new DataLayoutPass());
// Provide basic AliasAnalysis support for GVN.
OurFPM.add(createBasicAliasAnalysisPass());
// Do simple "peephole" optimizations and bit-twiddling optzns.
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.
TheModule->setDataLayout(TheExecutionEngine->getDataLayout());
- OurFPM.add(new DataLayoutPass());
// Provide basic AliasAnalysis support for GVN.
OurFPM.add(createBasicAliasAnalysisPass());
// Promote allocas to registers.
// Set up the optimizer pipeline. Start with registering info about how the
// target lays out data structures.
TheModule->setDataLayout(TheExecutionEngine->getDataLayout());
- OurFPM.add(new DataLayoutPass());
#if 0
// Provide basic AliasAnalysis support for GVN.
OurFPM.add(createBasicAliasAnalysisPass());
M(new Module(GenerateUniqueName("jit_module_"),
Session.getLLVMContext())),
Builder(Session.getLLVMContext()) {
- M->setDataLayout(Session.getTarget().getDataLayout());
+ M->setDataLayout(*Session.getTarget().getDataLayout());
}
SessionContext& getSession() { return Session; }
M(new Module(GenerateUniqueName("jit_module_"),
Session.getLLVMContext())),
Builder(Session.getLLVMContext()) {
- M->setDataLayout(Session.getTarget().getDataLayout());
+ M->setDataLayout(*Session.getTarget().getDataLayout());
}
SessionContext& getSession() { return Session; }
M(new Module(GenerateUniqueName("jit_module_"),
Session.getLLVMContext())),
Builder(Session.getLLVMContext()) {
- M->setDataLayout(Session.getTarget().getDataLayout());
+ M->setDataLayout(*Session.getTarget().getDataLayout());
}
SessionContext& getSession() { return Session; }
M(new Module(GenerateUniqueName("jit_module_"),
Session.getLLVMContext())),
Builder(Session.getLLVMContext()) {
- M->setDataLayout(Session.getTarget().getDataLayout());
+ M->setDataLayout(*Session.getTarget().getDataLayout());
}
SessionContext& getSession() { return Session; }
/// typically called by the run* methods of these subclasses. This may be
/// called multiple times.
///
- void InitializeAliasAnalysis(Pass *P);
+ void InitializeAliasAnalysis(Pass *P, const DataLayout *DL);
/// getAnalysisUsage - All alias analysis implementations should invoke this
/// directly (using AliasAnalysis::getAnalysisUsage(AU)).
#define LLVM_ANALYSIS_LIBCALLALIASANALYSIS_H
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
namespace llvm {
void getAnalysisUsage(AnalysisUsage &AU) const override;
- bool runOnFunction(Function &F) override {
- InitializeAliasAnalysis(this); // set up super class
- return false;
- }
-
+ bool runOnFunction(Function &F) override;
+
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
auto Names = llvm::make_unique<StringMap<bool>>();
for (const auto &M : Ms) {
- Mangler Mang(M->getDataLayout());
+ Mangler Mang(&M->getDataLayout());
for (const auto &GV : M->globals())
if (addGlobalValue(*Names, GV, Mang, SearchName, ExportedSymbolsOnly))
/// \brief Primitive type alignment data.
SmallVector<LayoutAlignElem, 16> Alignments;
+ /// \brief The string representation used to create this DataLayout
+ std::string StringRepresentation;
+
typedef SmallVector<PointerAlignElem, 8> PointersTy;
PointersTy Pointers;
DataLayout &operator=(const DataLayout &DL) {
clear();
+ StringRepresentation = DL.StringRepresentation;
BigEndian = DL.isBigEndian();
StackNaturalAlign = DL.StackNaturalAlign;
ManglingMode = DL.ManglingMode;
/// \brief Returns the string representation of the DataLayout.
///
/// This representation is in the same format accepted by the string
- /// constructor above.
- std::string getStringRepresentation() const;
+ /// constructor above. This should not be used to compare two DataLayout as
+ /// different string can represent the same layout.
+ std::string getStringRepresentation() const { return StringRepresentation; }
+
+ /// \brief Test if the DataLayout was constructed from an empty string.
+ bool isDefault() const { return StringRepresentation.empty(); }
/// \brief Returns true if the specified type is known to be a native integer
/// type supported by the CPU.
return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout *>(P));
}
-class DataLayoutPass : public ImmutablePass {
- DataLayout DL;
-
-public:
- /// This has to exist, because this is a pass, but it should never be used.
- DataLayoutPass();
- ~DataLayoutPass();
-
- const DataLayout &getDataLayout() const { return DL; }
-
- static char ID; // Pass identification, replacement for typeid
-
- bool doFinalization(Module &M) override;
- bool doInitialization(Module &M) override;
-};
-
/// Used to lazily calculate structure layout information for a target machine,
/// based on the DataLayout structure.
class StructLayout {
std::string TargetTriple; ///< Platform target triple Module compiled on
///< Format: (arch)(sub)-(vendor)-(sys0-(abi)
void *NamedMDSymTab; ///< NamedMDNode names.
-
- // We need to keep the string because the C API expects us to own the string
- // representation.
- // Since we have it, we also use an empty string to represent a module without
- // a DataLayout. If it has a DataLayout, these variables are in sync and the
- // string is just a cache of getDataLayout()->getStringRepresentation().
- std::string DataLayoutStr;
- DataLayout DL;
+ DataLayout DL; ///< DataLayout associated with the module
friend class Constant;
/// Get the data layout string for the module's target platform. This is
/// equivalent to getDataLayout()->getStringRepresentation().
- const std::string &getDataLayoutStr() const { return DataLayoutStr; }
+ const std::string getDataLayoutStr() const {
+ return DL.getStringRepresentation();
+ }
/// Get the data layout for the module's target platform.
- const DataLayout *getDataLayout() const;
+ const DataLayout &getDataLayout() const;
/// Get the target triple which is a string describing the target host.
/// @returns a string containing the target triple.
/// Set the data layout
void setDataLayout(StringRef Desc);
- void setDataLayout(const DataLayout *Other);
+ void setDataLayout(const DataLayout &Other);
/// Set the target triple.
void setTargetTriple(StringRef T) { TargetTriple = T; }
class InlineFunctionInfo {
public:
explicit InlineFunctionInfo(CallGraph *cg = nullptr,
- const DataLayout *DL = nullptr,
AliasAnalysis *AA = nullptr,
AssumptionCacheTracker *ACT = nullptr)
- : CG(cg), DL(DL), AA(AA), ACT(ACT) {}
+ : CG(cg), AA(AA), ACT(ACT) {}
/// CG - If non-null, InlineFunction will update the callgraph to reflect the
/// changes it makes.
CallGraph *CG;
- const DataLayout *DL;
AliasAnalysis *AA;
AssumptionCacheTracker *ACT;
/// InitializeAliasAnalysis - Subclasses must call this method to initialize the
/// AliasAnalysis interface before any other methods are called.
///
-void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
- DataLayoutPass *DLP = P->getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+void AliasAnalysis::InitializeAliasAnalysis(Pass *P, const DataLayout *NewDL) {
+ DL = NewDL;
auto *TLIP = P->getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
AA = &P->getAnalysis<AliasAnalysis>();
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
bool runOnModule(Module &M) override {
this->M = &M;
- InitializeAliasAnalysis(this);
+ InitializeAliasAnalysis(this, &M.getDataLayout());
return false;
}
}
bool runOnModule(Module &M) override {
- InitializeAliasAnalysis(this); // set up super class
+ InitializeAliasAnalysis(this, &M.getDataLayout()); // set up super class
for(Module::global_iterator I = M.global_begin(),
E = M.global_end(); I != E; ++I) {
initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
- void initializePass() override {
- InitializeAliasAnalysis(this);
- }
+ bool doInitialization(Module &M) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AliasAnalysis>();
return false;
}
+bool BasicAliasAnalysis::doInitialization(Module &M) {
+ InitializeAliasAnalysis(this, &M.getDataLayout());
+ return true;
+}
+
/// getModRefInfo - Check to see if the specified callsite can clobber the
/// specified memory object. Since we only look at local properties of this
/// function, we really can't say much about this query. We do, however, use
return QueryResult;
}
- void initializePass() override { InitializeAliasAnalysis(this); }
+ bool doInitialization(Module &M) override;
};
void FunctionHandle::removeSelfFromCache() {
return AliasAnalysis::NoAlias;
}
+
+bool CFLAliasAnalysis::doInitialization(Module &M) {
+ InitializeAliasAnalysis(this, &M.getDataLayout());
+ return true;
+}
}
bool runOnModule(Module &M) override {
- InitializeAliasAnalysis(this);
+ InitializeAliasAnalysis(this, &M.getDataLayout());
// Find non-addr taken globals.
AnalyzeGlobals(M);
}
bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
- const DataLayout *DL = I.getModule()->getDataLayout();
// Propagate constants through ptrtoint.
Constant *COp = dyn_cast<Constant>(I.getOperand(0));
if (!COp)
// Track base/offset pairs when converted to a plain integer provided the
// integer is large enough to represent the pointer.
unsigned IntegerSize = I.getType()->getScalarSizeInBits();
- if (DL && IntegerSize >= DL->getPointerSizeInBits()) {
+ const DataLayout &DL = I.getModule()->getDataLayout();
+ if (IntegerSize >= DL.getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset
= ConstantOffsetPtrs.lookup(I.getOperand(0));
if (BaseAndOffset.first)
}
bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
- const DataLayout *DL = I.getModule()->getDataLayout();
// Propagate constants through ptrtoint.
Constant *COp = dyn_cast<Constant>(I.getOperand(0));
if (!COp)
// modifications provided the integer is not too large.
Value *Op = I.getOperand(0);
unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
- if (DL && IntegerSize <= DL->getPointerSizeInBits()) {
+ const DataLayout &DL = I.getModule()->getDataLayout();
+ if (IntegerSize <= DL.getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
if (BaseAndOffset.first)
ConstantOffsetPtrs[&I] = BaseAndOffset;
DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
<< "...\n");
- CallAnalyzer CA(Callee->getParent()->getDataLayout(), TTIWP->getTTI(*Callee),
+ CallAnalyzer CA(&Callee->getParent()->getDataLayout(), TTIWP->getTTI(*Callee),
ACT, *Callee, Threshold);
bool ShouldInline = CA.analyzeCall(CS);
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &L->getHeader()->getModule()->getDataLayout();
// Find all uses of induction variables in this loop, and categorize
// them by stride. Start by finding all of the PHI nodes in the header for
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
AU.setPreservesAll(); // Does not transform code
}
-
+bool LibCallAliasAnalysis::runOnFunction(Function &F) {
+ // set up super class
+ InitializeAliasAnalysis(this, &F.getParent()->getDataLayout());
+ return false;
+}
/// AnalyzeLibCallDetails - Given a call to a function with the specified
/// LibCallFunctionInfo, see if we can improve the mod/ref footprint of the call
AA = &getAnalysis<AliasAnalysis>();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
visit(F);
dbgs() << MessagesStr.str();
// Try to get the DataLayout for this module. This may be null, in which case
// the optimizations will be limited.
- const DataLayout *DL = ScanBB->getModule()->getDataLayout();
+ const DataLayout &DL = ScanBB->getModule()->getDataLayout();
// Try to get the store size for the type.
- uint64_t AccessSize = DL ? DL->getTypeStoreSize(AccessTy)
- : AA ? AA->getTypeStoreSize(AccessTy) : 0;
+ uint64_t AccessSize = DL.getTypeStoreSize(AccessTy);
Value *StrippedPtr = Ptr->stripPointerCasts();
if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
if (AreEquivalentAddressValues(
LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) &&
- CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
+ CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, &DL)) {
if (AATags)
LI->getAAMetadata(*AATags);
return LI;
// those cases are unlikely.)
if (AreEquivalentAddressValues(StorePtr, StrippedPtr) &&
CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(),
- AccessTy, DL)) {
+ AccessTy, &DL)) {
if (AATags)
SI->getAAMetadata(*AATags);
return SI->getOperand(0);
bool LoopAccessAnalysis::runOnFunction(Function &F) {
SE = &getAnalysis<ScalarEvolution>();
- DL = F.getParent()->getDataLayout();
+ DL = &F.getParent()->getDataLayout();
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
AA = &getAnalysis<AliasAnalysis>();
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
initializeMemDerefPrinterPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
AU.setPreservesAll();
}
bool runOnFunction(Function &F) override;
char MemDerefPrinter::ID = 0;
INITIALIZE_PASS_BEGIN(MemDerefPrinter, "print-memderefs",
"Memory Dereferenciblity of pointers in function", false, true)
-INITIALIZE_PASS_DEPENDENCY(DataLayoutPass)
INITIALIZE_PASS_END(MemDerefPrinter, "print-memderefs",
"Memory Dereferenciblity of pointers in function", false, true)
}
bool MemDerefPrinter::runOnFunction(Function &F) {
- const DataLayout *DL = &getAnalysis<DataLayoutPass>().getDataLayout();
+ const DataLayout &DL = F.getParent()->getDataLayout();
for (auto &I: inst_range(F)) {
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
Value *PO = LI->getPointerOperand();
- if (PO->isDereferenceablePointer(DL))
+ if (PO->isDereferenceablePointer(&DL))
Vec.push_back(PO);
}
}
bool MemoryDependenceAnalysis::runOnFunction(Function &F) {
AA = &getAnalysis<AliasAnalysis>();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
using namespace llvm;
void getAnalysisUsage(AnalysisUsage &AU) const override {}
- void initializePass() override {
+ bool doInitialization(Module &M) override {
// Note: NoAA does not call InitializeAliasAnalysis because it's
// special and does not support chaining.
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &M.getDataLayout();
+ return true;
}
AliasResult alias(const Location &LocA, const Location &LocB) override {
this->F = &F;
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
return false;
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
using namespace llvm;
bool
ScalarEvolutionAliasAnalysis::runOnFunction(Function &F) {
- InitializeAliasAnalysis(this);
+ InitializeAliasAnalysis(this, &F.getParent()->getDataLayout());
SE = &getAnalysis<ScalarEvolution>();
return false;
}
initializeScopedNoAliasAAPass(*PassRegistry::getPassRegistry());
}
- void initializePass() override { InitializeAliasAnalysis(this); }
+ bool doInitialization(Module &M) override;
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
return new ScopedNoAliasAA();
}
+bool ScopedNoAliasAA::doInitialization(Module &M) {
+ InitializeAliasAnalysis(this, &M.getDataLayout());
+ return true;
+}
+
void
ScopedNoAliasAA::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
char TargetIRAnalysis::PassID;
TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(Function &F) {
- return Result(F.getParent()->getDataLayout());
+ return Result(&F.getParent()->getDataLayout());
}
// Register the basic pass.
initializeTypeBasedAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
- void initializePass() override {
- InitializeAliasAnalysis(this);
- }
+ bool doInitialization(Module &M) override;
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
return new TypeBasedAliasAnalysis();
}
+bool TypeBasedAliasAnalysis::doInitialization(Module &M) {
+ InitializeAliasAnalysis(this, &M.getDataLayout());
+ return true;
+}
+
void
TypeBasedAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
Builder.SetInsertPoint(Entry->getFirstInsertionPt());
Function *FrameAllocFn =
Intrinsic::getDeclaration(M, Intrinsic::frameallocate);
- uint64_t EHAllocSize = M->getDataLayout()->getTypeAllocSize(EHDataStructTy);
+ uint64_t EHAllocSize = M->getDataLayout().getTypeAllocSize(EHDataStructTy);
Value *FrameAllocArgs[] = {
ConstantInt::get(Type::getInt32Ty(Context), EHAllocSize)};
CallInst *FrameAlloc =
CloneAndPruneIntoFromInst(
Handler, SrcFn, ++II, VMap,
/*ModuleLevelChanges=*/false, Returns, "", &InlinedFunctionInfo,
- SrcFn->getParent()->getDataLayout(), Director.get());
+ &SrcFn->getParent()->getDataLayout(), Director.get());
// Move all the instructions in the first cloned block into our entry block.
BasicBlock *FirstClonedBB = std::next(Function::iterator(Entry));
legacy::PassManager PM;
- M->setDataLayout(TM->getDataLayout());
- PM.add(new DataLayoutPass());
+ M->setDataLayout(*TM->getDataLayout());
// The RuntimeDyld will take ownership of this shortly
SmallVector<char, 4096> ObjBufferSV;
// If this module doesn't have a DataLayout attached then attach the
// default.
- if (!M->getDataLayout())
- M->setDataLayout(getDataLayout());
+ if (M->getDataLayout().isDefault())
+ M->setDataLayout(*getDataLayout());
Modules.push_back(std::move(M));
std::vector<Module *> Ms;
#include <cstdlib>
using namespace llvm;
-// Handle the Pass registration stuff necessary to use DataLayout's.
-
-INITIALIZE_PASS(DataLayoutPass, "datalayout", "Data Layout", false, true)
-char DataLayoutPass::ID = 0;
-
//===----------------------------------------------------------------------===//
// Support for StructLayout
//===----------------------------------------------------------------------===//
}
void DataLayout::parseSpecifier(StringRef Desc) {
+ StringRepresentation = Desc;
while (!Desc.empty()) {
// Split at '-'.
std::pair<StringRef, StringRef> Split = split(Desc, '-');
init(M);
}
-void DataLayout::init(const Module *M) {
- const DataLayout *Other = M->getDataLayout();
- if (Other)
- *this = *Other;
- else
- reset("");
-}
+void DataLayout::init(const Module *M) { *this = M->getDataLayout(); }
bool DataLayout::operator==(const DataLayout &Other) const {
bool Ret = BigEndian == Other.BigEndian &&
ManglingMode == Other.ManglingMode &&
LegalIntWidths == Other.LegalIntWidths &&
Alignments == Other.Alignments && Pointers == Other.Pointers;
- assert(Ret == (getStringRepresentation() == Other.getStringRepresentation()));
+ // Note: getStringRepresentation() might differs, it is not canonicalized
return Ret;
}
return L;
}
-std::string DataLayout::getStringRepresentation() const {
- std::string Result;
- raw_string_ostream OS(Result);
-
- OS << (BigEndian ? "E" : "e");
-
- switch (ManglingMode) {
- case MM_None:
- break;
- case MM_ELF:
- OS << "-m:e";
- break;
- case MM_MachO:
- OS << "-m:o";
- break;
- case MM_WINCOFF:
- OS << "-m:w";
- break;
- case MM_Mips:
- OS << "-m:m";
- break;
- }
-
- for (const PointerAlignElem &PI : Pointers) {
- // Skip default.
- if (PI.AddressSpace == 0 && PI.ABIAlign == 8 && PI.PrefAlign == 8 &&
- PI.TypeByteWidth == 8)
- continue;
-
- OS << "-p";
- if (PI.AddressSpace) {
- OS << PI.AddressSpace;
- }
- OS << ":" << PI.TypeByteWidth*8 << ':' << PI.ABIAlign*8;
- if (PI.PrefAlign != PI.ABIAlign)
- OS << ':' << PI.PrefAlign*8;
- }
-
- for (const LayoutAlignElem &AI : Alignments) {
- if (std::find(std::begin(DefaultAlignments), std::end(DefaultAlignments),
- AI) != std::end(DefaultAlignments))
- continue;
- OS << '-' << (char)AI.AlignType;
- if (AI.TypeBitWidth)
- OS << AI.TypeBitWidth;
- OS << ':' << AI.ABIAlign*8;
- if (AI.ABIAlign != AI.PrefAlign)
- OS << ':' << AI.PrefAlign*8;
- }
-
- if (!LegalIntWidths.empty()) {
- OS << "-n" << (unsigned)LegalIntWidths[0];
-
- for (unsigned i = 1, e = LegalIntWidths.size(); i != e; ++i)
- OS << ':' << (unsigned)LegalIntWidths[i];
- }
-
- if (StackNaturalAlign)
- OS << "-S" << StackNaturalAlign*8;
-
- return OS.str();
-}
unsigned DataLayout::getPointerABIAlignment(unsigned AS) const {
PointersTy::const_iterator I = findPointerLowerBound(AS);
return Log2_32(getPreferredAlignment(GV));
}
-DataLayoutPass::DataLayoutPass() : ImmutablePass(ID), DL("") {
- initializeDataLayoutPassPass(*PassRegistry::getPassRegistry());
-}
-
-DataLayoutPass::~DataLayoutPass() {}
-
-bool DataLayoutPass::doInitialization(Module &M) {
- DL.init(&M);
- return false;
-}
-
-bool DataLayoutPass::doFinalization(Module &M) {
- DL.reset("");
- return false;
-}
void Module::setDataLayout(StringRef Desc) {
DL.reset(Desc);
-
- if (Desc.empty()) {
- DataLayoutStr = "";
- } else {
- DataLayoutStr = DL.getStringRepresentation();
- // DataLayoutStr is now equivalent to Desc, but since the representation
- // is not unique, they may not be identical.
- }
}
-void Module::setDataLayout(const DataLayout *Other) {
- if (!Other) {
- DataLayoutStr = "";
- DL.reset("");
- } else {
- DL = *Other;
- DataLayoutStr = DL.getStringRepresentation();
- }
-}
+void Module::setDataLayout(const DataLayout &Other) { DL = Other; }
-const DataLayout *Module::getDataLayout() const {
- if (DataLayoutStr.empty())
- return nullptr;
- return &DL;
-}
+const DataLayout &Module::getDataLayout() const { return DL; }
//===----------------------------------------------------------------------===//
// Methods to control the materialization of GlobalValues in the Module.
legacy::PassManager passes;
// Add an appropriate DataLayout instance for this module...
- mergedModule->setDataLayout(TargetMach->getDataLayout());
+ mergedModule->setDataLayout(*TargetMach->getDataLayout());
- passes.add(new DataLayoutPass());
passes.add(
createTargetTransformInfoWrapperPass(TargetMach->getTargetIRAnalysis()));
legacy::PassManager codeGenPasses;
- codeGenPasses.add(new DataLayoutPass());
-
formatted_raw_ostream Out(out);
// If the bitcode files contain ARC code and were compiled with optimization,
TargetMachine *target = march->createTargetMachine(TripleStr, CPU, FeatureStr,
options);
- M->setDataLayout(target->getDataLayout());
+ M->setDataLayout(*target->getDataLayout());
std::unique_ptr<object::IRObjectFile> IRObj(
new object::IRObjectFile(Buffer, std::move(M)));
getComdatLeader(SrcM, ComdatName, SrcGV))
return true;
- const DataLayout *DstDL = DstM->getDataLayout();
- const DataLayout *SrcDL = SrcM->getDataLayout();
- if (!DstDL || !SrcDL) {
- return emitError(
- "Linking COMDATs named '" + ComdatName +
- "': can't do size dependent selection without DataLayout!");
- }
+ const DataLayout &DstDL = DstM->getDataLayout();
+ const DataLayout &SrcDL = SrcM->getDataLayout();
uint64_t DstSize =
- DstDL->getTypeAllocSize(DstGV->getType()->getPointerElementType());
+ DstDL.getTypeAllocSize(DstGV->getType()->getPointerElementType());
uint64_t SrcSize =
- SrcDL->getTypeAllocSize(SrcGV->getType()->getPointerElementType());
+ SrcDL.getTypeAllocSize(SrcGV->getType()->getPointerElementType());
if (Result == Comdat::SelectionKind::ExactMatch) {
if (SrcGV->getInitializer() != DstGV->getInitializer())
return emitError("Linking COMDATs named '" + ComdatName +
// Inherit the target data from the source module if the destination module
// doesn't have one already.
- if (!DstM->getDataLayout() && SrcM->getDataLayout())
+ if (DstM->getDataLayout().isDefault())
DstM->setDataLayout(SrcM->getDataLayout());
- if (SrcM->getDataLayout() && DstM->getDataLayout() &&
- *SrcM->getDataLayout() != *DstM->getDataLayout()) {
+ if (SrcM->getDataLayout() != DstM->getDataLayout()) {
emitWarning("Linking two modules of different data layouts: '" +
SrcM->getModuleIdentifier() + "' is '" +
SrcM->getDataLayoutStr() + "' whereas '" +
IRObjectFile::IRObjectFile(MemoryBufferRef Object, std::unique_ptr<Module> Mod)
: SymbolicFile(Binary::ID_IR, Object), M(std::move(Mod)) {
- // If we have a DataLayout, setup a mangler.
- const DataLayout *DL = M->getDataLayout();
- if (!DL)
- return;
-
- Mang.reset(new Mangler(DL));
+ // Setup a mangler with the DataLayout.
+ const DataLayout &DL = M->getDataLayout();
+ Mang.reset(new Mangler(&DL));
const std::string &InlineAsm = M->getModuleInlineAsm();
if (InlineAsm.empty())
printEscapedString(mName);
Out << "\", getGlobalContext());";
if (!TheModule->getTargetTriple().empty()) {
- nl(Out) << "mod->setDataLayout(\"" << TheModule->getDataLayout() << "\");";
+ nl(Out) << "mod->setDataLayout(\"" << TheModule->getDataLayoutStr()
+ << "\");";
}
if (!TheModule->getTargetTriple().empty()) {
nl(Out) << "mod->setTargetTriple(\"" << TheModule->getTargetTriple()
NVPTXAllocaHoisting() : FunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
AU.addPreserved<MachineFunctionAnalysis>();
AU.addPreserved<StackProtector>();
}
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "nvptx"
using namespace llvm;
SmallVector<MemTransferInst *, 4> aggrMemcpys;
SmallVector<MemSetInst *, 4> aggrMemsets;
- const DataLayout *DL = &getAnalysis<DataLayoutPass>().getDataLayout();
+ const DataLayout &DL = F.getParent()->getDataLayout();
LLVMContext &Context = F.getParent()->getContext();
//
if (load->hasOneUse() == false)
continue;
- if (DL->getTypeStoreSize(load->getType()) < MaxAggrCopySize)
+ if (DL.getTypeStoreSize(load->getType()) < MaxAggrCopySize)
continue;
User *use = load->user_back();
StoreInst *store = dyn_cast<StoreInst>(*load->user_begin());
Value *srcAddr = load->getOperand(0);
Value *dstAddr = store->getOperand(1);
- unsigned numLoads = DL->getTypeStoreSize(load->getType());
+ unsigned numLoads = DL.getTypeStoreSize(load->getType());
Value *len = ConstantInt::get(Type::getInt32Ty(Context), numLoads);
convertTransferToLoop(store, srcAddr, dstAddr, len, load->isVolatile(),
NVPTXLowerAggrCopies() : FunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
AU.addPreserved<MachineFunctionAnalysis>();
AU.addPreserved<StackProtector>();
}
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
LibInfo = TLIP ? &TLIP->getTLI() : nullptr;
bool PPCLoopDataPrefetch::runOnFunction(Function &F) {
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolution>();
- DL = F.getParent()->getDataLayout();
+ DL = &F.getParent()->getDataLayout();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Scalar.h"
PPCTargetMachine *TM;
LoopInfo *LI;
ScalarEvolution *SE;
- const DataLayout *DL;
};
}
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolution>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
-
bool MadeChange = false;
for (LoopInfo::iterator I = LI->begin(), E = LI->end();
bool PPCLoopPreIncPrep::runOnLoop(Loop *L) {
bool MadeChange = false;
- if (!DL)
- return MadeChange;
-
// Only prep. the inner-most loop
if (!L->empty())
return MadeChange;
continue;
if (Use->getParent()->getParent() == &F)
LocalMemAvailable -=
- Mod->getDataLayout()->getTypeAllocSize(GVTy->getElementType());
+ Mod->getDataLayout().getTypeAllocSize(GVTy->getElementType());
}
}
}
// value from the reqd_work_group_size function attribute if it is
// available.
unsigned WorkGroupSize = 256;
- int AllocaSize = WorkGroupSize *
- Mod->getDataLayout()->getTypeAllocSize(AllocaTy);
+ int AllocaSize =
+ WorkGroupSize * Mod->getDataLayout().getTypeAllocSize(AllocaTy);
if (AllocaSize > LocalMemAvailable) {
DEBUG(dbgs() << " Not enough local memory to promote alloca.\n");
}
void llvm::initializeTarget(PassRegistry &Registry) {
- initializeDataLayoutPassPass(Registry);
initializeTargetLibraryInfoWrapperPassPass(Registry);
initializeTargetTransformInfoWrapperPassPass(Registry);
}
}
void LLVMAddTargetData(LLVMTargetDataRef TD, LLVMPassManagerRef PM) {
- // The DataLayoutPass must now be in sync with the module. Unfortunatelly we
- // cannot enforce that from the C api.
- unwrap(PM)->add(new DataLayoutPass());
}
void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef TLI,
*ErrorMessage = strdup(error.c_str());
return true;
}
- Mod->setDataLayout(td);
- pass.add(new DataLayoutPass());
+ Mod->setDataLayout(*td);
TargetMachine::CodeGenFileType ft;
switch (codegen) {
bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) {
bool Changed = false, LocalChange;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
-
do { // Iterate until we stop promoting from this SCC.
LocalChange = false;
// Attempt to promote arguments from all functions in this SCC.
// Make sure that it is local to this module.
if (!F || !F->hasLocalLinkage()) return nullptr;
+ DL = &F->getParent()->getDataLayout();
+
// First check: see if there are any pointer arguments! If not, quick exit.
SmallVector<Argument*, 16> PointerArgs;
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
}
bool ConstantMerge::runOnModule(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &M.getDataLayout();
// Find all the globals that are marked "used". These cannot be merged.
SmallPtrSet<const GlobalValue*, 8> UsedGlobals;
const GlobalStatus &GS);
bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
- const DataLayout *DL;
+ // const DataLayout *DL;
TargetLibraryInfo *TLI;
SmallSet<const Comdat *, 8> NotDiscardableComdats;
};
/// quick scan over the use list to clean up the easy and obvious cruft. This
/// returns true if it made a change.
static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
- const DataLayout *DL,
+ const DataLayout &DL,
TargetLibraryInfo *TLI) {
bool Changed = false;
// Note that we need to use a weak value handle for the worklist items. When
// and will invalidate our notion of what Init is.
Constant *SubInit = nullptr;
if (!isa<ConstantExpr>(GEP->getOperand(0))) {
- ConstantExpr *CE =
- dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, DL, TLI));
+ ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(
+ ConstantFoldInstruction(GEP, &DL, TLI));
if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
/// if the loaded value is dynamically null, then we know that they cannot be
/// reachable with a null optimize away the load.
static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
- const DataLayout *DL,
+ const DataLayout &DL,
TargetLibraryInfo *TLI) {
bool Changed = false;
/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
/// instructions that are foldable.
-static void ConstantPropUsersOf(Value *V, const DataLayout *DL,
+static void ConstantPropUsersOf(Value *V, const DataLayout &DL,
TargetLibraryInfo *TLI) {
for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
if (Instruction *I = dyn_cast<Instruction>(*UI++))
- if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
+ if (Constant *NewC = ConstantFoldInstruction(I, &DL, TLI)) {
I->replaceAllUsesWith(NewC);
// Advance UI to the next non-I use to avoid invalidating it!
/// the specified malloc. Because it is always the result of the specified
/// malloc, there is no reason to actually DO the malloc. Instead, turn the
/// malloc into a global, and any loads of GV as uses of the new global.
-static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
- CallInst *CI,
- Type *AllocTy,
- ConstantInt *NElements,
- const DataLayout *DL,
- TargetLibraryInfo *TLI) {
+static GlobalVariable *
+OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
+ ConstantInt *NElements, const DataLayout &DL,
+ TargetLibraryInfo *TLI) {
DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
Type *GlobalType;
/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
- Value *NElems, const DataLayout *DL,
+ Value *NElems, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
Type *MAT = getMallocAllocatedType(CI, TLI);
GV->getThreadLocalMode());
FieldGlobals.push_back(NGV);
- unsigned TypeSize = DL->getTypeAllocSize(FieldTy);
+ unsigned TypeSize = DL.getTypeAllocSize(FieldTy);
if (StructType *ST = dyn_cast<StructType>(FieldTy))
- TypeSize = DL->getStructLayout(ST)->getSizeInBytes();
- Type *IntPtrTy = DL->getIntPtrType(CI->getType());
+ TypeSize = DL.getStructLayout(ST)->getSizeInBytes();
+ Type *IntPtrTy = DL.getIntPtrType(CI->getType());
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
ConstantInt::get(IntPtrTy, TypeSize),
NElems, nullptr,
/// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a
/// pointer global variable with a single value stored it that is a malloc or
/// cast of malloc.
-static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
- CallInst *CI,
+static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
Type *AllocTy,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
- const DataLayout *DL,
+ const DataLayout &DL,
TargetLibraryInfo *TLI) {
- if (!DL)
- return false;
-
// If this is a malloc of an abstract type, don't touch it.
if (!AllocTy->isSized())
return false;
// This eliminates dynamic allocation, avoids an indirection accessing the
// data, and exposes the resultant global to further GlobalOpt.
// We cannot optimize the malloc if we cannot determine malloc array size.
- Value *NElems = getMallocArraySize(CI, DL, TLI, true);
+ Value *NElems = getMallocArraySize(CI, &DL, TLI, true);
if (!NElems)
return false;
// Restrict this transformation to only working on small allocations
// (2048 bytes currently), as we don't want to introduce a 16M global or
// something.
- if (NElements->getZExtValue() * DL->getTypeAllocSize(AllocTy) < 2048) {
+ if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) {
GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
return true;
}
// If this is a fixed size array, transform the Malloc to be an alloc of
// structs. malloc [100 x struct],1 -> malloc struct, 100
if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
- Type *IntPtrTy = DL->getIntPtrType(CI->getType());
- unsigned TypeSize = DL->getStructLayout(AllocSTy)->getSizeInBytes();
+ Type *IntPtrTy = DL.getIntPtrType(CI->getType());
+ unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes();
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
CI = cast<CallInst>(Malloc);
}
- GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true),
+ GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, &DL, TLI, true),
DL, TLI);
return true;
}
static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
- const DataLayout *DL,
+ const DataLayout &DL,
TargetLibraryInfo *TLI) {
// Ignore no-op GEPs and bitcasts.
StoredOnceVal = StoredOnceVal->stripPointerCasts();
bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
Module::global_iterator &GVI,
const GlobalStatus &GS) {
+ auto &DL = GV->getParent()->getDataLayout();
// If this is a first class global and has only one accessing function
// and this function is main (which we know is not recursive), we replace
// the global with a local alloca in this function.
++NumMarked;
return true;
} else if (!GV->getInitializer()->getType()->isSingleValueType()) {
- if (DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>()) {
- const DataLayout &DL = DLP->getDataLayout();
- if (GlobalVariable *FirstNewGV = SRAGlobal(GV, DL)) {
- GVI = FirstNewGV; // Don't skip the newly produced globals!
- return true;
- }
+ const DataLayout &DL = GV->getParent()->getDataLayout();
+ if (GlobalVariable *FirstNewGV = SRAGlobal(GV, DL)) {
+ GVI = FirstNewGV; // Don't skip the newly produced globals!
+ return true;
}
} else if (GS.StoredType == GlobalStatus::StoredOnce) {
// If the initial value for the global was an undef value, and if only
// Simplify the initializer.
if (GV->hasInitializer())
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
- Constant *New = ConstantFoldConstantExpression(CE, DL, TLI);
+ auto &DL = M.getDataLayout();
+ Constant *New = ConstantFoldConstantExpression(CE, &DL, TLI);
if (New && New != CE)
GV->setInitializer(New);
}
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
- SmallPtrSetImpl<Constant*> &SimpleConstants,
- const DataLayout *DL);
-
+ SmallPtrSetImpl<Constant *> &SimpleConstants,
+ const DataLayout &DL);
/// isSimpleEnoughValueToCommit - Return true if the specified constant can be
/// handled by the code generator. We don't want to generate something like:
/// This function should be called if C was not found (but just got inserted)
/// in SimpleConstants to avoid having to rescan the same constants all the
/// time.
-static bool isSimpleEnoughValueToCommitHelper(Constant *C,
- SmallPtrSetImpl<Constant*> &SimpleConstants,
- const DataLayout *DL) {
+static bool
+isSimpleEnoughValueToCommitHelper(Constant *C,
+ SmallPtrSetImpl<Constant *> &SimpleConstants,
+ const DataLayout &DL) {
// Simple global addresses are supported, do not allow dllimport or
// thread-local globals.
if (auto *GV = dyn_cast<GlobalValue>(C))
case Instruction::PtrToInt:
// int <=> ptr is fine if the int type is the same size as the
// pointer type.
- if (!DL || DL->getTypeSizeInBits(CE->getType()) !=
- DL->getTypeSizeInBits(CE->getOperand(0)->getType()))
+ if (DL.getTypeSizeInBits(CE->getType()) !=
+ DL.getTypeSizeInBits(CE->getOperand(0)->getType()))
return false;
return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
- SmallPtrSetImpl<Constant*> &SimpleConstants,
- const DataLayout *DL) {
+ SmallPtrSetImpl<Constant *> &SimpleConstants,
+ const DataLayout &DL) {
// If we already checked this constant, we win.
if (!SimpleConstants.insert(C).second)
return true;
/// Once an evaluation call fails, the evaluation object should not be reused.
class Evaluator {
public:
- Evaluator(const DataLayout *DL, const TargetLibraryInfo *TLI)
- : DL(DL), TLI(TLI) {
+ Evaluator(const DataLayout &DL, const TargetLibraryInfo *TLI)
+ : DL(DL), TLI(TLI) {
ValueStack.emplace_back();
}
/// simple enough to live in a static initializer of a global.
SmallPtrSet<Constant*, 8> SimpleConstants;
- const DataLayout *DL;
+ const DataLayout &DL;
const TargetLibraryInfo *TLI;
};
Constant *Ptr = getVal(SI->getOperand(1));
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
- Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, &DL, TLI);
DEBUG(dbgs() << "; To: " << *Ptr << "\n");
}
if (!isSimpleEnoughPointerToCommit(Ptr)) {
Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
- Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, &DL, TLI);
// If we can't improve the situation by introspecting NewTy,
// we have to give up.
Constant *Ptr = getVal(LI->getOperand(0));
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
- Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, &DL, TLI);
DEBUG(dbgs() << "Found a constant pointer expression, constant "
"folding: " << *Ptr << "\n");
}
Value *Ptr = PtrArg->stripPointerCasts();
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
Type *ElemTy = cast<PointerType>(GV->getType())->getElementType();
- if (DL && !Size->isAllOnesValue() &&
+ if (!Size->isAllOnesValue() &&
Size->getValue().getLimitedValue() >=
- DL->getTypeStoreSize(ElemTy)) {
+ DL.getTypeStoreSize(ElemTy)) {
Invariants.insert(GV);
DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV
<< "\n");
if (!CurInst->use_empty()) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
- InstResult = ConstantFoldConstantExpression(CE, DL, TLI);
+ InstResult = ConstantFoldConstantExpression(CE, &DL, TLI);
setVal(CurInst, InstResult);
}
/// EvaluateStaticConstructor - Evaluate static constructors in the function, if
/// we can. Return true if we can, false otherwise.
-static bool EvaluateStaticConstructor(Function *F, const DataLayout *DL,
+static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
// Call the function.
Evaluator Eval(DL, TLI);
bool GlobalOpt::runOnModule(Module &M) {
bool Changed = false;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ auto &DL = M.getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
bool LocalChange = true;
/// any new allocas to the set if not possible.
static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
InlinedArrayAllocasTy &InlinedArrayAllocas,
- int InlineHistory, bool InsertLifetime,
- const DataLayout *DL) {
+ int InlineHistory, bool InsertLifetime) {
Function *Callee = CS.getCalledFunction();
Function *Caller = CS.getCaller();
unsigned Align1 = AI->getAlignment(),
Align2 = AvailableAlloca->getAlignment();
- // If we don't have data layout information, and only one alloca is using
- // the target default, then we can't safely merge them because we can't
- // pick the greater alignment.
- if (!DL && (!Align1 || !Align2) && Align1 != Align2)
- continue;
// The available alloca has to be in the right function, not in some other
// function in this SCC.
if (Align1 != Align2) {
if (!Align1 || !Align2) {
- assert(DL && "DataLayout required to compare default alignments");
- unsigned TypeAlign = DL->getABITypeAlignment(AI->getAllocatedType());
+ const DataLayout &DL = Caller->getParent()->getDataLayout();
+ unsigned TypeAlign = DL.getABITypeAlignment(AI->getAllocatedType());
Align1 = Align1 ? Align1 : TypeAlign;
Align2 = Align2 ? Align2 : TypeAlign;
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
AssumptionCacheTracker *ACT = &getAnalysis<AssumptionCacheTracker>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
const TargetLibraryInfo *TLI = TLIP ? &TLIP->getTLI() : nullptr;
AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();
InlinedArrayAllocasTy InlinedArrayAllocas;
- InlineFunctionInfo InlineInfo(&CG, DL, AA, ACT);
+ InlineFunctionInfo InlineInfo(&CG, AA, ACT);
// Now that we have all of the call sites, loop over them and inline them if
// it looks profitable to do so.
// Attempt to inline the function.
if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
- InlineHistoryID, InsertLifetime, DL)) {
+ InlineHistoryID, InsertLifetime)) {
emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
Twine(Callee->getName() +
" will not be inlined into " +
bool LowerBitSets::doInitialization(Module &Mod) {
M = &Mod;
-
- DL = M->getDataLayout();
- if (!DL)
- report_fatal_error("Data layout required");
+ DL = &Mod.getDataLayout();
Int1Ty = Type::getInt1Ty(M->getContext());
Int8Ty = Type::getInt8Ty(M->getContext());
bool MergeFunctions::runOnModule(Module &M) {
bool Changed = false;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &M.getDataLayout();
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
return MadeIRChange;
}
-static bool combineInstructionsOverFunction(
- Function &F, InstCombineWorklist &Worklist, AssumptionCache &AC,
- TargetLibraryInfo &TLI, DominatorTree &DT, const DataLayout *DL = nullptr,
- LoopInfo *LI = nullptr) {
+static bool
+combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
+ AssumptionCache &AC, TargetLibraryInfo &TLI,
+ DominatorTree &DT, LoopInfo *LI = nullptr) {
// Minimizing size?
bool MinimizeSize = F.hasFnAttribute(Attribute::MinSize);
+ const DataLayout &DL = F.getParent()->getDataLayout();
/// Builder - This is an IRBuilder that automatically inserts new
/// instructions into the worklist when they are created.
IRBuilder<true, TargetFolder, InstCombineIRInserter> Builder(
- F.getContext(), TargetFolder(DL), InstCombineIRInserter(Worklist, &AC));
+ F.getContext(), TargetFolder(&DL), InstCombineIRInserter(Worklist, &AC));
// Lower dbg.declare intrinsics otherwise their value may be clobbered
// by instcombiner.
<< F.getName() << "\n");
bool Changed = false;
- if (prepareICWorklistFromFunction(F, DL, &TLI, Worklist))
+ if (prepareICWorklistFromFunction(F, &DL, &TLI, Worklist))
Changed = true;
- InstCombiner IC(Worklist, &Builder, MinimizeSize, &AC, &TLI, &DT, DL, LI);
+ InstCombiner IC(Worklist, &Builder, MinimizeSize, &AC, &TLI, &DT, &DL, LI);
if (IC.run())
Changed = true;
PreservedAnalyses InstCombinePass::run(Function &F,
AnalysisManager<Function> *AM) {
- auto *DL = F.getParent()->getDataLayout();
-
auto &AC = AM->getResult<AssumptionAnalysis>(F);
auto &DT = AM->getResult<DominatorTreeAnalysis>(F);
auto &TLI = AM->getResult<TargetLibraryAnalysis>(F);
auto *LI = AM->getCachedResult<LoopAnalysis>(F);
- if (!combineInstructionsOverFunction(F, Worklist, AC, TLI, DT, DL, LI))
+ if (!combineInstructionsOverFunction(F, Worklist, AC, TLI, DT, LI))
// No changes, all analyses are preserved.
return PreservedAnalyses::all();
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
// Optional analyses.
- auto *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- auto *DL = DLP ? &DLP->getDataLayout() : nullptr;
auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
- return combineInstructionsOverFunction(F, Worklist, AC, TLI, DT, DL, LI);
+ return combineInstructionsOverFunction(F, Worklist, AC, TLI, DT, LI);
}
char InstructionCombiningPass::ID = 0;
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<DataLayoutPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
}
bool AddressSanitizerModule::runOnModule(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP) return false;
- DL = &DLP->getDataLayout();
+ DL = &M.getDataLayout();
C = &(M.getContext());
int LongSize = DL->getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
// virtual
bool AddressSanitizer::doInitialization(Module &M) {
// Initialize the private fields. No one has accessed them before.
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP) report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ DL = &M.getDataLayout();
GlobalsMD.init(M);
bool runOnFunction(Function &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
}
bool BoundsChecking::runOnFunction(Function &F) {
- DL = &getAnalysis<DataLayoutPass>().getDataLayout();
+ DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TrapBB = nullptr;
bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
TargetTriple.getArch() == llvm::Triple::mips64el;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP)
- report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ DL = &M.getDataLayout();
Mod = &M;
Ctx = &M.getContext();
}
bool DataFlowSanitizer::runOnModule(Module &M) {
- if (!DL)
- return false;
if (ABIList.isIn(M, "skip"))
return false;
///
/// inserts a call to __msan_init to the module's constructor list.
bool MemorySanitizer::doInitialization(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP)
- report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ DL = &M.getDataLayout();
Triple TargetTriple(M.getTargetTriple());
switch (TargetTriple.getOS()) {
return "SanitizerCoverageModule";
}
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
- }
-
private:
void InjectCoverageForIndirectCalls(Function &F,
ArrayRef<Instruction *> IndirCalls);
bool SanitizerCoverageModule::runOnModule(Module &M) {
if (!CoverageLevel) return false;
C = &(M.getContext());
- DataLayoutPass *DLP = &getAnalysis<DataLayoutPass>();
- IntptrTy = Type::getIntNTy(*C, DLP->getDataLayout().getPointerSizeInBits());
+ auto &DL = M.getDataLayout();
+ IntptrTy = Type::getIntNTy(*C, DL.getPointerSizeInBits());
Type *VoidTy = Type::getVoidTy(*C);
IRBuilder<> IRB(*C);
Type *Int8PtrTy = PointerType::getUnqual(IRB.getInt8Ty());
}
bool ThreadSanitizer::doInitialization(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP)
- report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ DL = &M.getDataLayout();
// Always insert a call to __tsan_init into the module's CTORs.
IRBuilder<> IRB(M.getContext());
return new ObjCARCAliasAnalysis();
}
+bool ObjCARCAliasAnalysis::doInitialization(Module &M) {
+ InitializeAliasAnalysis(this, &M.getDataLayout());
+ return true;
+}
+
void
ObjCARCAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
}
private:
- void initializePass() override {
- InitializeAliasAnalysis(this);
- }
+ bool doInitialization(Module &M) override;
/// This method is used when a pass implements an analysis interface through
/// multiple inheritance. If needed, it should override this to adjust the
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
NewDestAlignments.clear();
NewSrcAlignments.clear();
return false;
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- DL = F.getParent()->getDataLayout();
+ DL = &F.getParent()->getDataLayout();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
DenseMap<Instruction *, APInt> AliveBits;
WorkList.insert(&*i);
}
bool Changed = false;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ const DataLayout &DL = F.getParent()->getDataLayout();
TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
WorkList.erase(WorkList.begin()); // Get an element from the worklist...
if (!I->use_empty()) // Don't muck with dead instructions...
- if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
+ if (Constant *C = ConstantFoldInstruction(I, &DL, TLI)) {
// Add all of the users of this instruction to the worklist, they might
// be constant propagatable now...
for (User *U : I->users())
PreservedAnalyses EarlyCSEPass::run(Function &F,
AnalysisManager<Function> *AM) {
- const DataLayout *DL = F.getParent()->getDataLayout();
+ const DataLayout &DL = F.getParent()->getDataLayout();
auto &TLI = AM->getResult<TargetLibraryAnalysis>(F);
auto &TTI = AM->getResult<TargetIRAnalysis>(F);
auto &DT = AM->getResult<DominatorTreeAnalysis>(F);
auto &AC = AM->getResult<AssumptionAnalysis>(F);
- EarlyCSE CSE(F, DL, TLI, TTI, DT, AC);
+ EarlyCSE CSE(F, &DL, TLI, TTI, DT, AC);
if (!CSE.run())
return PreservedAnalyses::all();
if (skipOptnoneFunction(F))
return false;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- auto *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ auto &DL = F.getParent()->getDataLayout();
auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- EarlyCSE CSE(F, DL, TLI, TTI, DT, AC);
+ EarlyCSE CSE(F, &DL, TLI, TTI, DT, AC);
return CSE.run();
}
if (!NoLoads)
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &L->getHeader()->getModule()->getDataLayout();
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
auto *TTIP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>();
return false;
DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
LVI = &getAnalysis<LazyValueInfo>();
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
#include "llvm/IR/PredIteratorCache.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &L->getHeader()->getModule()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
bool LoadCombine::doInitialization(Function &F) {
DEBUG(dbgs() << "LoadCombine function: " << F.getName() << "\n");
C = &F.getContext();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP) {
+ DL = &F.getParent()->getDataLayout();
+ if (!DL) {
DEBUG(dbgs() << " Skipping LoadCombine -- no target data!\n");
return false;
}
- DL = &DLP->getDataLayout();
return true;
}
class LoopIdiomRecognize : public LoopPass {
Loop *CurLoop;
- const DataLayout *DL;
DominatorTree *DT;
ScalarEvolution *SE;
TargetLibraryInfo *TLI;
static char ID;
explicit LoopIdiomRecognize() : LoopPass(ID) {
initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());
- DL = nullptr; DT = nullptr; SE = nullptr; TLI = nullptr; TTI = nullptr;
+ DT = nullptr;
+ SE = nullptr;
+ TLI = nullptr;
+ TTI = nullptr;
}
bool runOnLoop(Loop *L, LPPassManager &LPM) override;
AU.addRequired<TargetTransformInfoWrapperPass>();
}
- const DataLayout *getDataLayout() {
- if (DL)
- return DL;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
- return DL;
- }
-
DominatorTree *getDominatorTree() {
return DT ? DT
: (DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree());
if (BECst->getValue()->getValue() == 0)
return false;
- // We require target data for now.
- if (!getDataLayout())
- return false;
-
// set DT
(void)getDominatorTree();
Value *StorePtr = SI->getPointerOperand();
// Reject stores that are so large that they overflow an unsigned.
- uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
+ auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
+ uint64_t SizeInBits = DL.getTypeSizeInBits(StoredVal->getType());
if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
return false;
// but it can be turned into memset_pattern if the target supports it.
Value *SplatValue = isBytewiseValue(StoredVal);
Constant *PatternValue = nullptr;
-
+ auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
// If we're allowed to form a memset, and the stored value would be acceptable
CurLoop->isLoopInvariant(SplatValue)) {
// Keep and use SplatValue.
PatternValue = nullptr;
- } else if (DestAS == 0 &&
- TLI->has(LibFunc::memset_pattern16) &&
- (PatternValue = getMemSetPatternValue(StoredVal, *DL))) {
+ } else if (DestAS == 0 && TLI->has(LibFunc::memset_pattern16) &&
+ (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
// Don't create memset_pattern16s with address spaces.
// It looks like we can use PatternValue!
SplatValue = nullptr;
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- Type *IntPtr = Builder.getIntPtrTy(DL, DestAS);
+ Type *IntPtr = Builder.getIntPtrTy(&DL, DestAS);
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- Type *IntPtrTy = Builder.getIntPtrTy(DL, SI->getPointerAddressSpace());
+ auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
+ Type *IntPtrTy = Builder.getIntPtrTy(&DL, SI->getPointerAddressSpace());
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy);
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtrTy, 1),
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout();
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
SE = &getAnalysis<ScalarEvolution>();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &L->getHeader()->getModule()->getDataLayout();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
BasicBlock *Header = L->getHeader();
bool MadeChange = false;
MD = &getAnalysis<MemoryDependenceAnalysis>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
// If we don't have at least memset and memcpy, there is little point of doing
/// Constant Propagation.
///
class SCCPSolver : public InstVisitor<SCCPSolver> {
- const DataLayout *DL;
+ const DataLayout &DL;
const TargetLibraryInfo *TLI;
SmallPtrSet<BasicBlock*, 8> BBExecutable; // The BBs that are executable.
DenseMap<Value*, LatticeVal> ValueState; // The state each value is in.
typedef std::pair<BasicBlock*, BasicBlock*> Edge;
DenseSet<Edge> KnownFeasibleEdges;
public:
- SCCPSolver(const DataLayout *DL, const TargetLibraryInfo *tli)
- : DL(DL), TLI(tli) {}
+ SCCPSolver(const DataLayout &DL, const TargetLibraryInfo *tli)
+ : DL(DL), TLI(tli) {}
/// MarkBlockExecutable - This method can be used by clients to mark all of
/// the blocks that are known to be intrinsically live in the processed unit.
}
// Transform load from a constant into a constant if possible.
- if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, DL))
+ if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, &DL))
return markConstant(IV, &I, C);
// Otherwise we cannot say for certain what value this load will produce.
return false;
DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
- const DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ const DataLayout &DL = F.getParent()->getDataLayout();
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
SCCPSolver Solver(DL, TLI);
}
bool IPSCCP::runOnModule(Module &M) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ const DataLayout &DL = M.getDataLayout();
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
SCCPSolver Solver(DL, TLI);
DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
C = &F.getContext();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (!DLP) {
- DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
- return false;
- }
- DL = &DLP->getDataLayout();
+ DL = &F.getParent()->getDataLayout();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
if (skipOptnoneFunction(F))
return false;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
bool Changed = performPromotion(F);
}
bool Scalarizer::runOnFunction(Function &F) {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) {
BasicBlock *BB = BBI;
for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<DataLayoutPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
AU.setPreservesCFG();
}
bool doInitialization(Module &M) override {
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- if (DLP == nullptr)
- report_fatal_error("data layout missing");
- DL = &DLP->getDataLayout();
+ DL = &M.getDataLayout();
return false;
}
"Split GEPs to a variadic base and a constant offset for better CSE", false,
false)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(DataLayoutPass)
INITIALIZE_PASS_END(
SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
"Split GEPs to a variadic base and a constant offset for better CSE", false,
PreservedAnalyses SimplifyCFGPass::run(Function &F,
AnalysisManager<Function> *AM) {
- auto *DL = F.getParent()->getDataLayout();
+ auto &DL = F.getParent()->getDataLayout();
auto &TTI = AM->getResult<TargetIRAnalysis>(F);
auto &AC = AM->getResult<AssumptionAnalysis>(F);
- if (!simplifyFunctionCFG(F, TTI, DL, &AC, BonusInstThreshold))
+ if (!simplifyFunctionCFG(F, TTI, &DL, &AC, BonusInstThreshold))
return PreservedAnalyses::none();
return PreservedAnalyses::all();
&getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
const TargetTransformInfo &TTI =
getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
- return simplifyFunctionCFG(F, TTI, DL, AC, BonusInstThreshold);
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ return simplifyFunctionCFG(F, TTI, &DL, AC, BonusInstThreshold);
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
AA = &getAnalysis<AliasAnalysis>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
bool MadeChange, EverMadeChange = false;
if (skipOptnoneFunction(F))
return false;
- DL = F.getParent()->getDataLayout();
+ DL = &F.getParent()->getDataLayout();
bool AllCallsAreTailCalls = false;
bool Modified = markTails(F, AllCallsAreTailCalls);
/// If the inlined function has non-byval align arguments, then
/// add @llvm.assume-based alignment assumptions to preserve this information.
static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
- if (!PreserveAlignmentAssumptions || !IFI.DL)
+ if (!PreserveAlignmentAssumptions)
return;
+ auto &DL = CS.getCaller()->getParent()->getDataLayout();
// To avoid inserting redundant assumptions, we should check for assumptions
// already in the caller. To do this, we might need a DT of the caller.
// If we can already prove the asserted alignment in the context of the
// caller, then don't bother inserting the assumption.
Value *Arg = CS.getArgument(I->getArgNo());
- if (getKnownAlignment(Arg, IFI.DL,
- &IFI.ACT->getAssumptionCache(*CalledFunc),
+ if (getKnownAlignment(Arg, &DL, &IFI.ACT->getAssumptionCache(*CalledFunc),
CS.getInstruction(), &DT) >= Align)
continue;
- IRBuilder<>(CS.getInstruction()).CreateAlignmentAssumption(*IFI.DL, Arg,
- Align);
+ IRBuilder<>(CS.getInstruction())
+ .CreateAlignmentAssumption(DL, Arg, Align);
}
}
}
Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
IRBuilder<> Builder(InsertBlock->begin());
- Value *Size;
- if (IFI.DL == nullptr)
- Size = ConstantExpr::getSizeOf(AggTy);
- else
- Size = Builder.getInt64(IFI.DL->getTypeStoreSize(AggTy));
+ Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
// Always generate a memcpy of alignment 1 here because we don't know
// the alignment of the src pointer. Other optimizations can infer
// If the pointer is already known to be sufficiently aligned, or if we can
// round it up to a larger alignment, then we don't need a temporary.
- if (getOrEnforceKnownAlignment(Arg, ByValAlignment, IFI.DL,
+ auto &DL = Caller->getParent()->getDataLayout();
+ if (getOrEnforceKnownAlignment(Arg, ByValAlignment, &DL,
&IFI.ACT->getAssumptionCache(*Caller),
TheCall) >= ByValAlignment)
return Arg;
}
// Create the alloca. If we have DataLayout, use nice alignment.
- unsigned Align = 1;
- if (IFI.DL)
- Align = IFI.DL->getPrefTypeAlignment(AggTy);
-
+ unsigned Align =
+ Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy);
+
// If the byval had an alignment specified, we *must* use at least that
// alignment, as it is required by the byval argument (and uses of the
// pointer inside the callee).
// Keep a list of pair (dst, src) to emit byval initializations.
SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
+ auto &DL = Caller->getParent()->getDataLayout();
+
assert(CalledFunc->arg_size() == CS.arg_size() &&
"No varargs calls can be inlined!");
// have no dead or constant instructions leftover after inlining occurs
// (which can happen, e.g., because an argument was constant), but we'll be
// happy with whatever the cloner can do.
- CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
+ CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
/*ModuleLevelChanges=*/false, Returns, ".i",
- &InlinedFunctionInfo, IFI.DL, TheCall);
+ &InlinedFunctionInfo, &DL, TheCall);
// Remember the first block that is newly cloned over.
FirstNewBlock = LastBlock; ++FirstNewBlock;
CloneAliasScopeMetadata(CS, VMap);
// Add noalias metadata if necessary.
- AddAliasScopeMetadata(CS, VMap, IFI.DL, IFI.AA);
+ AddAliasScopeMetadata(CS, VMap, &DL, IFI.AA);
// FIXME: We could register any cloned assumptions instead of clearing the
// whole function's cache.
ConstantInt *AllocaSize = nullptr;
if (ConstantInt *AIArraySize =
dyn_cast<ConstantInt>(AI->getArraySize())) {
- if (IFI.DL) {
- Type *AllocaType = AI->getAllocatedType();
- uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType);
- uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
- assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
- // Check that array size doesn't saturate uint64_t and doesn't
- // overflow when it's multiplied by type size.
- if (AllocaArraySize != ~0ULL &&
- UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
- AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
- AllocaArraySize * AllocaTypeSize);
- }
+ auto &DL = Caller->getParent()->getDataLayout();
+ Type *AllocaType = AI->getAllocatedType();
+ uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
+ uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
+ assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
+ // Check that array size doesn't saturate uint64_t and doesn't
+ // overflow when it's multiplied by type size.
+ if (AllocaArraySize != ~0ULL &&
+ UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
+ AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
+ AllocaArraySize * AllocaTypeSize);
}
}
// the entries are the same or undef). If so, remove the PHI so it doesn't
// block other optimizations.
if (PHI) {
- if (Value *V = SimplifyInstruction(PHI, IFI.DL, nullptr, nullptr,
+ auto &DL = Caller->getParent()->getDataLayout();
+ if (Value *V = SimplifyInstruction(PHI, &DL, nullptr, nullptr,
&IFI.ACT->getAssumptionCache(*Caller))) {
PHI->replaceAllUsesWith(V);
PHI->eraseFromParent();
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = getAnalysisIfAvailable<ScalarEvolution>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
// Simplify each loop nest in the function.
if (!OuterL && !CompletelyUnroll)
OuterL = L;
if (OuterL) {
- DataLayoutPass *DLP = PP->getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
- simplifyLoop(OuterL, DT, LI, PP, /*AliasAnalysis*/ nullptr, SE, DL, AC);
+ const DataLayout &DL = F->getParent()->getDataLayout();
+ simplifyLoop(OuterL, DT, LI, PP, /*AliasAnalysis*/ nullptr, SE, &DL, AC);
// LCSSA must be performed on the outermost affected loop. The unrolled
// loop's last loop latch is guaranteed to be in the outermost loop after
const DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
const DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
+ const DataLayout &DL = F.getParent()->getDataLayout();
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
AssumptionCache *AC =
continue;
// Don't waste time simplifying unused instructions.
if (!I->use_empty())
- if (Value *V = SimplifyInstruction(I, DL, TLI, DT, AC)) {
+ if (Value *V = SimplifyInstruction(I, &DL, TLI, DT, AC)) {
// Mark all uses for resimplification next time round the loop.
for (User *U : I->users())
Next->insert(cast<Instruction>(U));
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
AA = &P->getAnalysis<AliasAnalysis>();
DT = &P->getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &P->getAnalysis<ScalarEvolution>();
- DataLayoutPass *DLP = P->getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TTI = IgnoreTargetInfo
? nullptr
: &P->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &BB.getModule()->getDataLayout();
TTI = IgnoreTargetInfo
? nullptr
: &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
bool runOnFunction(Function &F) override {
SE = &getAnalysis<ScalarEvolution>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
return false;
SE = &getAnalysis<ScalarEvolution>();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+ DL = &F.getParent()->getDataLayout();
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
; AddRec: {{{(28 + (4 * (-4 + (3 * %m)) * %o) + %A),+,(8 * %m * %o)}<%for.i>,+,(12 * %o)}<%for.j>,+,20}<%for.k>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(i32) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of 4 bytes.
; CHECK: ArrayRef[{3,+,2}<%for.i>][{-4,+,3}<%for.j>][{7,+,5}<%for.k>]
define void @foo(i64 %n, i64 %m, i64 %o, i32* nocapture %A) #0 {
; AddRec: {{{(4 + (4 * (sext i32 %a.deps to i64) * (1 + (sext i32 %a.cols to i64))) + %a.base),+,(4 * (sext i32 %a.deps to i64) * (sext i32 %a.cols to i64))}<%for.i>,+,(4 * (sext i32 %a.deps to i64))}<%for.j>,+,4}<%for.k>
; CHECK: Base offset: %a.base
-; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of sizeof(float) bytes.
+; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of 4 bytes.
; CHECK: ArrayRef[{1,+,1}<nuw><nsw><%for.i>][{1,+,1}<nuw><nsw><%for.j>][{1,+,1}<nuw><nsw><%for.k>]
%struct.Mat = type { float*, i32, i32, i32, i32 }
; AddRec: {{{(4 + (4 * (sext i32 %a.deps to i64) * (1 + (sext i32 %a.cols to i64))) + %a.base),+,(4 * (sext i32 %a.deps to i64) * (sext i32 %a.cols to i64))}<%for.i>,+,(4 * (sext i32 %a.deps to i64))}<%for.j>,+,4}<%for.k>
; CHECK: Base offset: %a.base
-; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of sizeof(float) bytes.
+; CHECK: ArrayDecl[UnknownSize][(sext i32 %a.cols to i64)][(sext i32 %a.deps to i64)] with elements of 4 bytes.
; CHECK: ArrayRef[{1,+,1}<nuw><nsw><%for.i>][{1,+,1}<nuw><nsw><%for.j>][{1,+,1}<nuw><nsw><%for.k>]
%struct.Mat = type { float*, i32, i32, i32, i32 }
; A[2i+b][2j] = 1.0;
; }
-; AddRec: {{((%m * %b * sizeof(double)) + %A),+,(2 * %m * sizeof(double))}<%for.i>,+,(2 * sizeof(double))}<%for.j>
+; AddRec: {{((%m * %b * 8) + %A),+,(2 * %m * 8)}<%for.i>,+,(2 * 8)}<%for.j>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m] with elements of 8 bytes.
; CHECK: ArrayRef[{%b,+,2}<%for.i>][{0,+,2}<%for.j>]
; AddRec: {{{(56 + (8 * (-4 + (3 * %m)) * %o) + %A),+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of 8 bytes.
; CHECK: ArrayRef[{3,+,1}<nw><%for.i>][{-4,+,1}<nw><%for.j>][{7,+,1}<nw><%for.k>]
define void @foo(i64 %n, i64 %m, i64 %o, double* %A) {
; AddRec: {{{(56 + (8 * (-4 + (3 * %m)) * (%o + %p)) + %A),+,(8 * (%o + %p) * %m)}<%for.cond4.preheader.lr.ph.us>,+,(8 * (%o + %p))}<%for.body6.lr.ph.us.us>,+,8}<%for.body6.us.us>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][(%o + %p)] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][(%o + %p)] with elements of 8 bytes.
; CHECK: ArrayRef[{3,+,1}<nw><%for.cond4.preheader.lr.ph.us>][{-4,+,1}<nw><%for.body6.lr.ph.us.us>][{7,+,1}<nw><%for.body6.us.us>]
define void @foo(i64 %n, i64 %m, i64 %o, i64 %p, double* nocapture %A) nounwind uwtable {
; AddRec: {{{((8 * ((((%m * %p) + %q) * %o) + %r)) + %A),+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of 8 bytes.
; CHECK: ArrayRef[{%p,+,1}<nw><%for.i>][{%q,+,1}<nw><%for.j>][{%r,+,1}<nw><%for.k>]
define void @foo(i64 %n, i64 %m, i64 %o, double* %A, i64 %p, i64 %q, i64 %r) {
; Inst: %val = load double, double* %arrayidx
; In Loop with Header: for.j
-; AddRec: {{0,+,(%m * sizeof(double))}<%for.i>,+,sizeof(double)}<%for.j>
+; AddRec: {{0,+,(%m * 8)}<%for.i>,+,8}<%for.j>
; Base offset: %A
-; ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
+; ArrayDecl[UnknownSize][%m] with elements of 8 bytes.
; ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{0,+,1}<nuw><nsw><%for.j>]
; Inst: store double %val, double* %arrayidx
; In Loop with Header: for.j
; AddRec: {{%A,+,(8 * %m)}<%for.i>,+,8}<%for.j>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m] with elements of 8 bytes.
; CHECK: ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{0,+,1}<nuw><nsw><%for.j>]
define void @foo(i64 %n, i64 %m, double* %A) {
; AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
; CHECK: Base offset: %A
-; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
+; CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of 8 bytes.
; CHECK: ArrayRef[{0,+,1}<nuw><nsw><%for.i>][{0,+,1}<nuw><nsw><%for.j>][{0,+,1}<nuw><nsw><%for.k>]
define void @foo(i64 %n, i64 %m, i64 %o, double* %A) {
; ScalarEvolution should be able to understand the loop and eliminate the casts.
-; CHECK: {%d,+,sizeof(i32)}
+; CHECK: {%d,+,4}
define void @foo(i32* nocapture %d, i32 %n) nounwind {
entry:
; count, it should say so.
; PR7845
-; CHECK: Loop %for.cond: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.cond: <multiple exits> Unpredictable backedge-taken count.
; CHECK: Loop %for.cond: max backedge-taken count is 5
@.str = private constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=2]
; PR19799: Indvars miscompile due to an incorrect max backedge taken count from SCEV.
; CHECK-LABEL: @pr19799
-; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
; CHECK: Loop %for.body.i: max backedge-taken count is 1
@a = common global i32 0, align 4
; PR18886: Indvars miscompile due to an incorrect max backedge taken count from SCEV.
; CHECK-LABEL: @pr18886
-; CHECK: Loop %for.body: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body: <multiple exits> Unpredictable backedge-taken count.
; CHECK: Loop %for.body: max backedge-taken count is 3
@aa = global i64 0, align 8
; before the check is forever skipped.
;
; CHECK-LABEL: @cannot_compute_mustexit
-; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
-; CHECK: Loop %for.body.i: Unpredictable max backedge-taken count.
+; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body.i: Unpredictable max backedge-taken count.
@b = common global i32 0, align 4
define i32 @cannot_compute_mustexit() {
; MaxBECount should be the minimum of them.
;
; CHECK-LABEL: @two_mustexit
-; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
+; CHECK: Loop %for.body.i: <multiple exits> Unpredictable backedge-taken count.
; CHECK: Loop %for.body.i: max backedge-taken count is 1
define i32 @two_mustexit() {
entry:
; older bitcode files.
; Data Layout Test
-; CHECK: target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-f80:32-n8:16:32-S32"
+; CHECK: target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-a0:0:64-f80:32:32-n8:16:32-S32"
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-a0:0:64-f80:32:32-n8:16:32-S32"
; Module-Level Inline Assembly Test
; RUN: opt < %s -O3 | \
; RUN: llc -mtriple=thumbv7-apple-darwin10 -mattr=+neon | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
+
define void @fred(i32 %three_by_three, i8* %in, double %dt1, i32 %x_size, i32 %y_size, i8* %bp) nounwind {
entry:
; -- The loop following the load should only use a single add-literation
+++ /dev/null
-; RUN: not llvm-link %s %p/Inputs/comdat2.ll -S -o - 2>&1 | FileCheck %s
-
-$foo = comdat largest
-@foo = global i32 43, comdat($foo)
-; CHECK: Linking COMDATs named 'foo': can't do size dependent selection without DataLayout!
; REQUIRES: shell
-; RUN: llvm-link %s %S/Inputs/datalayout-a.ll -S -o - 2>%t.a.err | FileCheck %s
+; RUN: llvm-link %s %S/Inputs/datalayout-a.ll -S -o - 2>%t.a.err
; RUN: (echo foo ;cat %t.a.err) | FileCheck --check-prefix=WARN-A %s
-; RUN: llvm-link %s %S/Inputs/datalayout-b.ll -S -o - 2>%t.b.err | FileCheck %s
+; RUN: llvm-link %s %S/Inputs/datalayout-b.ll -S -o - 2>%t.b.err
; RUN: cat %t.b.err | FileCheck --check-prefix=WARN-B %s
target datalayout = "e"
-; CHECK: target datalayout = "e"
; WARN-A-NOT: WARNING
-; "PLAIN" - No optimizations. This tests the target-independent
+; "PLAIN" - No optimizations. This tests the default target layout
; constant folder.
; RUN: opt -S -o - < %s | FileCheck --check-prefix=PLAIN %s
-; "OPT" - Optimizations but no targetdata. This tests target-independent
+; "OPT" - Optimizations but no targetdata. This tests default target layout
; folding in the optimizers.
; RUN: opt -S -o - -instcombine -globalopt < %s | FileCheck --check-prefix=OPT %s
; folding in the optimizers.
; RUN: opt -S -o - -instcombine -globalopt -default-data-layout="e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64" < %s | FileCheck --check-prefix=TO %s
-; "SCEV" - ScalarEvolution but no targetdata.
+; "SCEV" - ScalarEvolution with default target layout
; RUN: opt -analyze -scalar-evolution < %s | FileCheck --check-prefix=SCEV %s
; PLAIN: @F1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
; PLAIN: @H8 = global i8* getelementptr (i8* null, i32 -1)
; PLAIN: @H1 = global i1* getelementptr (i1* null, i32 -1)
-; OPT: @G8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
-; OPT: @G1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
-; OPT: @F8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2)
-; OPT: @F1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
-; OPT: @H8 = global i8* getelementptr (i8* null, i32 -1)
-; OPT: @H1 = global i1* getelementptr (i1* null, i32 -1)
+; OPT: @G8 = global i8* null
+; OPT: @G1 = global i1* null
+; OPT: @F8 = global i8* inttoptr (i64 -1 to i8*)
+; OPT: @F1 = global i1* inttoptr (i64 -1 to i1*)
+; OPT: @H8 = global i8* inttoptr (i64 -1 to i8*)
+; OPT: @H1 = global i1* inttoptr (i64 -1 to i1*)
; TO: @G8 = global i8* null
; TO: @G1 = global i1* null
; TO: @F8 = global i8* inttoptr (i64 -1 to i8*)
; PLAIN: @g = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
; PLAIN: @h = constant i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64)
; PLAIN: @i = constant i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64)
-; OPT: @a = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310)
-; OPT: @b = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
-; OPT: @c = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2)
-; OPT: @d = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11)
-; OPT: @e = constant i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64)
+; OPT: @a = constant i64 18480
+; OPT: @b = constant i64 8
+; OPT: @c = constant i64 16
+; OPT: @d = constant i64 88
+; OPT: @e = constant i64 16
; OPT: @f = constant i64 1
-; OPT: @g = constant i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
-; OPT: @h = constant i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64)
-; OPT: @i = constant i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64)
+; OPT: @g = constant i64 8
+; OPT: @h = constant i64 8
+; OPT: @i = constant i64 8
; TO: @a = constant i64 18480
; TO: @b = constant i64 8
; TO: @c = constant i64 16
; PLAIN: @M = constant i64* getelementptr (i64* null, i32 1)
; PLAIN: @N = constant i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1)
; PLAIN: @O = constant i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
-; OPT: @M = constant i64* getelementptr (i64* null, i32 1)
-; OPT: @N = constant i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1)
-; OPT: @O = constant i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
+; OPT: @M = constant i64* inttoptr (i64 8 to i64*)
+; OPT: @N = constant i64* inttoptr (i64 8 to i64*)
+; OPT: @O = constant i64* inttoptr (i64 8 to i64*)
; TO: @M = constant i64* inttoptr (i64 8 to i64*)
; TO: @N = constant i64* inttoptr (i64 8 to i64*)
; TO: @O = constant i64* inttoptr (i64 8 to i64*)
; PLAIN: @Y = global [3 x { i32, i32 }]* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 2)
; PLAIN: @Z = global i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
; OPT: @Y = global [3 x { i32, i32 }]* getelementptr ([3 x { i32, i32 }]* @ext, i64 2)
-; OPT: @Z = global i32* getelementptr (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
+; OPT: @Z = global i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
; TO: @Y = global [3 x { i32, i32 }]* getelementptr ([3 x { i32, i32 }]* @ext, i64 2)
; TO: @Z = global i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
; PLAIN: ret i1* %t
; PLAIN: }
; OPT: define i8* @goo8() #0 {
-; OPT: ret i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
+; OPT: ret i8* null
; OPT: }
; OPT: define i1* @goo1() #0 {
-; OPT: ret i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
+; OPT: ret i1* null
; OPT: }
; OPT: define i8* @foo8() #0 {
-; OPT: ret i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2)
+; OPT: ret i8* inttoptr (i64 -1 to i8*)
; OPT: }
; OPT: define i1* @foo1() #0 {
-; OPT: ret i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
+; OPT: ret i1* inttoptr (i64 -1 to i1*)
; OPT: }
; OPT: define i8* @hoo8() #0 {
-; OPT: ret i8* getelementptr (i8* null, i32 -1)
+; OPT: ret i8* inttoptr (i64 -1 to i8*)
; OPT: }
; OPT: define i1* @hoo1() #0 {
-; OPT: ret i1* getelementptr (i1* null, i32 -1)
+; OPT: ret i1* inttoptr (i64 -1 to i1*)
; OPT: }
; TO: define i8* @goo8() #0 {
; TO: ret i8* null
; TO: }
; SCEV: Classifying expressions for: @goo8
; SCEV: %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1) to i8*
-; SCEV: --> ((-1 * sizeof(i8)) + inttoptr (i32 1 to i8*))
+; SCEV: --> (-1 + inttoptr (i32 1 to i8*))
; SCEV: Classifying expressions for: @goo1
; SCEV: %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1) to i1*
-; SCEV: --> ((-1 * sizeof(i1)) + inttoptr (i32 1 to i1*))
+; SCEV: --> (-1 + inttoptr (i32 1 to i1*))
; SCEV: Classifying expressions for: @foo8
; SCEV: %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2) to i8*
-; SCEV: --> ((-2 * sizeof(i8)) + inttoptr (i32 1 to i8*))
+; SCEV: --> (-2 + inttoptr (i32 1 to i8*))
; SCEV: Classifying expressions for: @foo1
; SCEV: %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2) to i1*
-; SCEV: --> ((-2 * sizeof(i1)) + inttoptr (i32 1 to i1*))
+; SCEV: --> (-2 + inttoptr (i32 1 to i1*))
; SCEV: Classifying expressions for: @hoo8
-; SCEV: --> (-1 * sizeof(i8))
+; SCEV: --> -1
; SCEV: Classifying expressions for: @hoo1
-; SCEV: --> (-1 * sizeof(i1))
+; SCEV: --> -1
define i8* @goo8() nounwind {
%t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1) to i8*
; PLAIN: ret i64 %t
; PLAIN: }
; OPT: define i64 @fa() #0 {
-; OPT: ret i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310)
+; OPT: ret i64 18480
; OPT: }
; OPT: define i64 @fb() #0 {
-; OPT: ret i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
+; OPT: ret i64 8
; OPT: }
; OPT: define i64 @fc() #0 {
-; OPT: ret i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2)
+; OPT: ret i64 16
; OPT: }
; OPT: define i64 @fd() #0 {
-; OPT: ret i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11)
+; OPT: ret i64 88
; OPT: }
; OPT: define i64 @fe() #0 {
-; OPT: ret i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64)
+; OPT: ret i64 16
; OPT: }
; OPT: define i64 @ff() #0 {
; OPT: ret i64 1
; OPT: }
; OPT: define i64 @fg() #0 {
-; OPT: ret i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
+; OPT: ret i64 8
; OPT: }
; OPT: define i64 @fh() #0 {
-; OPT: ret i64 ptrtoint (i1** getelementptr (i1** null, i32 1) to i64)
+; OPT: ret i64 8
; OPT: }
; OPT: define i64 @fi() #0 {
-; OPT: ret i64 ptrtoint (i1** getelementptr ({ i1, i1* }* null, i64 0, i32 1) to i64)
+; OPT: ret i64 8
; OPT: }
; TO: define i64 @fa() #0 {
; TO: ret i64 18480
; TO: ret i64 8
; TO: }
; SCEV: Classifying expressions for: @fa
-; SCEV: %t = bitcast i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310) to i64
+; SCEV: %t = bitcast i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310) to i64
; SCEV: --> (2310 * sizeof(double))
; SCEV: Classifying expressions for: @fb
-; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
+; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
; SCEV: --> alignof(double)
; SCEV: Classifying expressions for: @fc
-; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2) to i64
+; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2) to i64
; SCEV: --> (2 * sizeof(double))
; SCEV: Classifying expressions for: @fd
-; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11) to i64
+; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11) to i64
; SCEV: --> (11 * sizeof(double))
; SCEV: Classifying expressions for: @fe
-; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64
+; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64
; SCEV: --> offsetof({ double, float, double, double }, 2)
; SCEV: Classifying expressions for: @ff
-; SCEV: %t = bitcast i64 1 to i64
+; SCEV: %t = bitcast i64 1 to i64
; SCEV: --> 1
; SCEV: Classifying expressions for: @fg
; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
; PLAIN: ret i64* %t
; PLAIN: }
; OPT: define i64* @fM() #0 {
-; OPT: ret i64* getelementptr (i64* null, i32 1)
+; OPT: ret i64* inttoptr (i64 8 to i64*)
; OPT: }
; OPT: define i64* @fN() #0 {
-; OPT: ret i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1)
+; OPT: ret i64* inttoptr (i64 8 to i64*)
; OPT: }
; OPT: define i64* @fO() #0 {
-; OPT: ret i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
+; OPT: ret i64* inttoptr (i64 8 to i64*)
; OPT: }
; TO: define i64* @fM() #0 {
; TO: ret i64* inttoptr (i64 8 to i64*)
; TO: ret i64* inttoptr (i64 8 to i64*)
; TO: }
; SCEV: Classifying expressions for: @fM
-; SCEV: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
-; SCEV: --> sizeof(i64)
+; SCEV: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
+; SCEV: --> 8
; SCEV: Classifying expressions for: @fN
-; SCEV: %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
-; SCEV: --> sizeof(i64)
+; SCEV: %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
+; SCEV: --> 8
; SCEV: Classifying expressions for: @fO
-; SCEV: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
-; SCEV: --> sizeof(i64)
+; SCEV: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
+; SCEV: --> 8
define i64* @fM() nounwind {
%t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
; PLAIN: ret i32* %t
; PLAIN: }
; OPT: define i32* @fZ() #0 {
-; OPT: ret i32* getelementptr (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1)
+; OPT: ret i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
; OPT: }
; TO: define i32* @fZ() #0 {
; TO: ret i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 1)
; TO: }
; SCEV: Classifying expressions for: @fZ
; SCEV: %t = bitcast i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
-; SCEV: --> ((3 * sizeof(i32)) + @ext)
+; SCEV: --> (12 + @ext)
define i32* @fZ() nounwind {
%t = bitcast i32* getelementptr inbounds (i32* getelementptr inbounds ([3 x { i32, i32 }]* @ext, i64 0, i64 1, i32 0), i64 1) to i32*
%p = getelementptr inbounds i8, i8* addrspacecast ([4 x i8] addrspace(12)* @p12 to i8*),
i32 2
ret i8* %p
-; OPT: ret i8* getelementptr (i8* addrspacecast (i8 addrspace(12)* getelementptr inbounds ([4 x i8] addrspace(12)* @p12, i32 0, i32 0) to i8*), i32 2)
+; OPT: ret i8* getelementptr ([4 x i8]* addrspacecast ([4 x i8] addrspace(12)* @p12 to [4 x i8]*), i64 0, i64 2)
}
define i8* @same_addrspace() nounwind noinline {
; OPT: same_addrspace
%p = getelementptr inbounds i8, i8* bitcast ([4 x i8] * @p0 to i8*), i32 2
ret i8* %p
-; OPT: ret i8* getelementptr inbounds ([4 x i8]* @p0, i32 0, i32 2)
+; OPT: ret i8* getelementptr inbounds ([4 x i8]* @p0, i64 0, i64 2)
}
@gv1 = internal global i32 1
; RUN: opt -constmerge -S < %s | FileCheck %s
; Test that in one run var3 is merged into var2 and var1 into var4.
-; Test that we merge @var5 and @var6 into one with the higher alignment, and
-; don't merge var7/var8 into var5/var6.
+; Test that we merge @var5 and @var6 into one with the higher alignment
declare void @zed(%struct.foobar*, %struct.foobar*)
@var7 = internal constant [16 x i8] c"foo1bar2foo3bar\00"
@var8 = private unnamed_addr constant [16 x i8] c"foo1bar2foo3bar\00"
-; CHECK-NEXT: @var6 = private constant [16 x i8] c"foo1bar2foo3bar\00", align 16
-; CHECK-NEXT: @var8 = private constant [16 x i8] c"foo1bar2foo3bar\00"
+; CHECK-NEXT: @var7 = internal constant [16 x i8] c"foo1bar2foo3bar\00"
+; CHECK-NEXT: @var8 = private constant [16 x i8] c"foo1bar2foo3bar\00", align 16
@var4a = alias %struct.foobar* @var4
@llvm.used = appending global [1 x %struct.foobar*] [%struct.foobar* @var4a], section "llvm.metadata"
declare void @test(i8*)
define void @print() {
-; CHECK: %1 = load i8*, i8** getelementptr inbounds ([1 x %struct.ButtonInitData]* @_ZL14buttonInitData, i32 0, i32 0, i32 0), align 4
+; CHECK: %1 = load i8*, i8** @_ZL14buttonInitData.0.0, align 4
%1 = load i8*, i8** getelementptr inbounds ([1 x %struct.ButtonInitData]* @_ZL14buttonInitData, i32 0, i32 0, i32 0), align 4
call void @test(i8* %1)
ret void
; RUN: opt < %s -indvars -S | grep sext | count 1
; ModuleID = '<stdin>'
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
%struct.App1Marker = type <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }>
%struct.ComponentInstanceRecord = type <{ [1 x i32] }>
%struct.DCPredictors = type { [5 x i16] }
; RUN: opt < %s -indvars -S | FileCheck %s
; PR4086
+
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
declare void @foo()
define void @test() {
; RUN: opt < %s -indvars -S | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
target triple = "x86_64-apple-darwin"
; CHECK-LABEL: @sloop
;RUN: opt -S %s -indvars | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
; CHECK-LABEL: @foo(
; CHECK-NOT: %lftr.wideiv = trunc i32 %indvars.iv.next to i16
; CHECK: %exitcond = icmp ne i32 %indvars.iv.next, 512
-; LFTR should eliminate the need for the computation of i*i completely. It
+; LFTR should eliminate the need for the computation of i*i completely. It
; is only used to compute the exit value.
; RUN: opt < %s -indvars -dce -S | not grep mul
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
@A = external global i32 ; <i32*> [#uses=1]
define i32 @quadratic_setlt() {
; RUN: opt < %s -indvars -S | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
@a = common global i32 0, align 4
@c = common global i32 0, align 4
@b = common global i32 0, align 4
; sext for the addressing, however it shouldn't eliminate the sext
; on the other phi, since that value undergoes signed wrapping.
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
define void @foo(i32* nocapture %d, i32 %n) nounwind {
entry:
%0 = icmp sgt i32 %n, 0 ; <i1> [#uses=1]
-; RUN: opt < %s -indvars -S > %t
-; RUN: not grep sext %t
+; RUN: opt < %s -indvars -S | not grep sext
+
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
define i64 @test(i64* nocapture %first, i32 %count) nounwind readonly {
entry:
; RUN: not grep sext %t
; RUN: grep phi %t | count 1
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
define void @foo(i64* nocapture %x, i32 %n) nounwind {
entry:
%tmp102 = icmp sgt i32 %n, 0 ; <i1> [#uses=1]
; RUN: opt -S -indvars < %s | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
define void @test1(float* nocapture %autoc, float* nocapture %data, float %d, i32 %data_len, i32 %sample) nounwind {
entry:
%sub = sub i32 %data_len, %sample
; RUN: opt < %s -indvars -S | FileCheck %s
target triple = "aarch64--linux-gnu"
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
; Check the loop exit i32 compare instruction and operand are widened to i64
; instead of truncating IV before its use in the i32 compare instruction.
+++ /dev/null
-; RUN: opt < %s -inline -S | FileCheck %s
-; This variant of the test has no data layout information.
-target triple = "powerpc64-unknown-linux-gnu"
-
-%struct.s = type { i32, i32 }
-
-define void @foo(%struct.s* byval nocapture readonly %a) {
-entry:
- %x = alloca [2 x i32], align 4
- %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32, i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
- store i32 %0, i32* %arrayidx, align 4
- %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32, i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
- store i32 %1, i32* %arrayidx2, align 4
- call void @bar(i32* %arrayidx) #2
- ret void
-}
-
-define void @foo0(%struct.s* byval nocapture readonly %a) {
-entry:
- %x = alloca [2 x i32]
- %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32, i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
- store i32 %0, i32* %arrayidx, align 4
- %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32, i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
- store i32 %1, i32* %arrayidx2, align 4
- call void @bar(i32* %arrayidx) #2
- ret void
-}
-
-declare void @bar(i32*) #1
-
-define void @goo(%struct.s* byval nocapture readonly %a) {
-entry:
- %x = alloca [2 x i32], align 32
- %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32, i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
- store i32 %0, i32* %arrayidx, align 32
- %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32, i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
- store i32 %1, i32* %arrayidx2, align 4
- call void @bar(i32* %arrayidx) #2
- ret void
-}
-
-; CHECK-LABEL: @main
-; CHECK: alloca [2 x i32], align 32
-; CHECK-NOT: alloca [2 x i32]
-; CHECK: ret i32 0
-
-define signext i32 @main() {
-entry:
- %a = alloca i64, align 8
- %tmpcast = bitcast i64* %a to %struct.s*
- store i64 0, i64* %a, align 8
- %a1 = bitcast i64* %a to i32*
- store i32 1, i32* %a1, align 8
- call void @foo(%struct.s* byval %tmpcast)
- store i32 2, i32* %a1, align 8
- call void @goo(%struct.s* byval %tmpcast)
- ret i32 0
-}
-
-; CHECK-LABEL: @test0
-; CHECK: alloca [2 x i32], align 32
-; CHECK: alloca [2 x i32]
-; CHECK: ret i32 0
-
-define signext i32 @test0() {
-entry:
- %a = alloca i64, align 8
- %tmpcast = bitcast i64* %a to %struct.s*
- store i64 0, i64* %a, align 8
- %a1 = bitcast i64* %a to i32*
- store i32 1, i32* %a1, align 8
- call void @foo0(%struct.s* byval %tmpcast)
- store i32 2, i32* %a1, align 8
- call void @goo(%struct.s* byval %tmpcast)
- ret i32 0
-}
call void @goo(%struct.s* byval %tmpcast)
ret i32 0
}
-
-; CHECK-LABEL: @test1
-; CHECK: {{alloca \[2 x i32\]$}}
-; CHECK-NOT: alloca [2 x i32]
-; CHECK: ret i32 0
-
-define signext i32 @test1() {
-entry:
- %a = alloca i64, align 8
- %tmpcast = bitcast i64* %a to %struct.s*
- store i64 0, i64* %a, align 8
- %a1 = bitcast i64* %a to i32*
- store i32 1, i32* %a1, align 8
- call void @foo0(%struct.s* byval %tmpcast)
- store i32 2, i32* %a1, align 8
- call void @foo1(%struct.s* byval %tmpcast)
- ret i32 0
-}
ret void
}
-; Size in llvm.lifetime.X should be -1 (unknown).
+; Size in llvm.lifetime.X should be 1 (default for i8).
define void @test() {
; CHECK-LABEL: @test(
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.start(i64 -1
+; CHECK: llvm.lifetime.start(i64 1
; CHECK-NOT: lifetime
-; CHECK: llvm.lifetime.end(i64 -1
+; CHECK: llvm.lifetime.end(i64 1
call void @helper()
; CHECK-NOT: lifetime
; CHECK: ret void
+++ /dev/null
-; RUN: opt < %s -instcombine -S | FileCheck %s
-
-; Check we don't crash due to lack of target data.
-
-@G = constant [100 x i8] zeroinitializer
-
-declare void @bar(i8*)
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
-
-define void @test() {
-; CHECK-LABEL: @test(
-; CHECK: llvm.memcpy
-; CHECK: ret void
- %A = alloca [100 x i8]
- %a = getelementptr inbounds [100 x i8], [100 x i8]* %A, i64 0, i64 0
- call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* getelementptr inbounds ([100 x i8]* @G, i64 0, i32 0), i64 100, i32 4, i1 false)
- call void @bar(i8* %a) readonly
- ret void
-}
%s1 = insertvalue {i32, i32} %s1.1, i32 %b, 1
%v1 = extractvalue {i32, i32} %s1, 0
%v2 = extractvalue {i32, i32} %s1, 1
-
+
; Build a nested struct and pull a sub struct out of it
; This requires instcombine to insert a few insertvalue instructions
%ns1.1 = insertvalue {i32, {i32, i32}} undef, i32 %v1, 0
}
; CHECK-LABEL: define i32 @extract2gep(
-; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %pair, i32 0, i32 1
+; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %pair, i64 0, i32 1
; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32, i32* [[GEP]]
; CHECK-NEXT: store
; CHECK-NEXT: br label %loop
}
; CHECK-LABEL: define i32 @doubleextract2gep(
-; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %arg, i32 0, i32 1, i32 1
+; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %arg, i64 0, i32 1, i32 1
; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32, i32* [[GEP]]
; CHECK-NEXT: ret i32 [[LOAD]]
define i32 @doubleextract2gep({i32, {i32, i32}}* %arg) {
-; RUN: opt < %s -datalayout -instcombine -S | FileCheck %s
+; RUN: opt < %s -instcombine -S | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
; Uses InstCombine with DataLayout to propagate dereferenceable
; attribute via gc.relocate: if the derived ptr is dereferenceable(N),
-; RUN: opt -instcombine -S < %s | FileCheck -check-prefix=NODL %s
-; RUN: opt -instcombine -S -default-data-layout="p:32:32:32-p1:16:16:16-n8:16:32:64" < %s | FileCheck -check-prefix=P32 %s
+; RUN: opt -instcombine -S -default-data-layout="p:32:32:32-p1:16:16:16-n8:16:32:64" < %s | FileCheck %s
@G16 = internal constant [10 x i16] [i16 35, i16 82, i16 69, i16 81, i16 85,
i16 73, i16 82, i16 69, i16 68, i16 0]
%Q = load i16, i16* %P
%R = icmp eq i16 %Q, 0
ret i1 %R
-; NODL-LABEL: @test1(
-; NODL-NEXT: %R = icmp eq i32 %X, 9
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test1(
-; P32-NEXT: %R = icmp eq i32 %X, 9
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: %R = icmp eq i32 %X, 9
+; CHECK-NEXT: ret i1 %R
}
define i1 @test1_noinbounds(i32 %X) {
%Q = load i16, i16* %P
%R = icmp eq i16 %Q, 0
ret i1 %R
-; NODL-LABEL: @test1_noinbounds(
-; NODL-NEXT: %P = getelementptr [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
-
-; P32-LABEL: @test1_noinbounds(
-; P32-NEXT: %R = icmp eq i32 %X, 9
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test1_noinbounds(
+; CHECK-NEXT: %R = icmp eq i32 %X, 9
+; CHECK-NEXT: ret i1 %R
}
define i1 @test1_noinbounds_i64(i64 %X) {
%Q = load i16, i16* %P
%R = icmp eq i16 %Q, 0
ret i1 %R
-; NODL-LABEL: @test1_noinbounds_i64(
-; NODL-NEXT: %P = getelementptr [10 x i16], [10 x i16]* @G16, i64 0, i64 %X
-
-; P32-LABEL: @test1_noinbounds_i64(
-; P32: %R = icmp eq i32 %1, 9
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test1_noinbounds_i64(
+; CHECK: %R = icmp eq i32 %1, 9
+; CHECK-NEXT: ret i1 %R
}
define i1 @test1_noinbounds_as1(i32 %x) {
%r = icmp eq i16 %q, 0
ret i1 %r
-; P32-LABEL: @test1_noinbounds_as1(
-; P32-NEXT: trunc i32 %x to i16
-; P32-NEXT: %r = icmp eq i16 %1, 9
-; P32-NEXT: ret i1 %r
+; CHECK-LABEL: @test1_noinbounds_as1(
+; CHECK-NEXT: trunc i32 %x to i16
+; CHECK-NEXT: %r = icmp eq i16 %1, 9
+; CHECK-NEXT: ret i1 %r
}
define i1 @test2(i32 %X) {
%Q = load i16, i16* %P
%R = icmp slt i16 %Q, 85
ret i1 %R
-; NODL-LABEL: @test2(
-; NODL-NEXT: %R = icmp ne i32 %X, 4
-; NODL-NEXT: ret i1 %R
+; CHECK-LABEL: @test2(
+; CHECK-NEXT: %R = icmp ne i32 %X, 4
+; CHECK-NEXT: ret i1 %R
}
define i1 @test3(i32 %X) {
%Q = load double, double* %P
%R = fcmp oeq double %Q, 1.0
ret i1 %R
-; NODL-LABEL: @test3(
-; NODL-NEXT: %R = icmp eq i32 %X, 1
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test3(
-; P32-NEXT: %R = icmp eq i32 %X, 1
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: %R = icmp eq i32 %X, 1
+; CHECK-NEXT: ret i1 %R
}
%Q = load i16, i16* %P
%R = icmp sle i16 %Q, 73
ret i1 %R
-; NODL-LABEL: @test4(
-; NODL-NEXT: lshr i32 933, %X
-; NODL-NEXT: and i32 {{.*}}, 1
-; NODL-NEXT: %R = icmp ne i32 {{.*}}, 0
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test4(
-; P32-NEXT: lshr i32 933, %X
-; P32-NEXT: and i32 {{.*}}, 1
-; P32-NEXT: %R = icmp ne i32 {{.*}}, 0
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: lshr i32 933, %X
+; CHECK-NEXT: and i32 {{.*}}, 1
+; CHECK-NEXT: %R = icmp ne i32 {{.*}}, 0
+; CHECK-NEXT: ret i1 %R
}
define i1 @test4_i16(i16 %X) {
%Q = load i16, i16* %P
%R = icmp sle i16 %Q, 73
ret i1 %R
-
-; NODL-LABEL: @test4_i16(
-; NODL-NEXT: lshr i16 933, %X
-; NODL-NEXT: and i16 {{.*}}, 1
-; NODL-NEXT: %R = icmp ne i16 {{.*}}, 0
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test4_i16(
-; P32-NEXT: sext i16 %X to i32
-; P32-NEXT: lshr i32 933, %1
-; P32-NEXT: and i32 {{.*}}, 1
-; P32-NEXT: %R = icmp ne i32 {{.*}}, 0
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test4_i16(
+; CHECK-NEXT: sext i16 %X to i32
+; CHECK-NEXT: lshr i32 933, %1
+; CHECK-NEXT: and i32 {{.*}}, 1
+; CHECK-NEXT: %R = icmp ne i32 {{.*}}, 0
+; CHECK-NEXT: ret i1 %R
}
define i1 @test5(i32 %X) {
%Q = load i16, i16* %P
%R = icmp eq i16 %Q, 69
ret i1 %R
-; NODL-LABEL: @test5(
-; NODL-NEXT: icmp eq i32 %X, 2
-; NODL-NEXT: icmp eq i32 %X, 7
-; NODL-NEXT: %R = or i1
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test5(
-; P32-NEXT: icmp eq i32 %X, 2
-; P32-NEXT: icmp eq i32 %X, 7
-; P32-NEXT: %R = or i1
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: icmp eq i32 %X, 2
+; CHECK-NEXT: icmp eq i32 %X, 7
+; CHECK-NEXT: %R = or i1
+; CHECK-NEXT: ret i1 %R
}
define i1 @test6(i32 %X) {
%Q = load double, double* %P
%R = fcmp ogt double %Q, 0.0
ret i1 %R
-; NODL-LABEL: @test6(
-; NODL-NEXT: add i32 %X, -1
-; NODL-NEXT: %R = icmp ult i32 {{.*}}, 3
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test6(
-; P32-NEXT: add i32 %X, -1
-; P32-NEXT: %R = icmp ult i32 {{.*}}, 3
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test6(
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: %R = icmp ult i32 {{.*}}, 3
+; CHECK-NEXT: ret i1 %R
}
define i1 @test7(i32 %X) {
%Q = load double, double* %P
%R = fcmp olt double %Q, 0.0
ret i1 %R
-; NODL-LABEL: @test7(
-; NODL-NEXT: add i32 %X, -1
-; NODL-NEXT: %R = icmp ugt i32 {{.*}}, 2
-; NODL-NEXT: ret i1 %R
-
-; P32-LABEL: @test7(
-; P32-NEXT: add i32 %X, -1
-; P32-NEXT: %R = icmp ugt i32 {{.*}}, 2
-; P32-NEXT: ret i1 %R
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: %R = icmp ugt i32 {{.*}}, 2
+; CHECK-NEXT: ret i1 %R
}
define i1 @test8(i32 %X) {
%R = and i16 %Q, 3
%S = icmp eq i16 %R, 0
ret i1 %S
-; NODL-LABEL: @test8(
-; NODL-NEXT: and i32 %X, -2
-; NODL-NEXT: icmp eq i32 {{.*}}, 8
-; NODL-NEXT: ret i1
-
-; P32-LABEL: @test8(
-; P32-NEXT: and i32 %X, -2
-; P32-NEXT: icmp eq i32 {{.*}}, 8
-; P32-NEXT: ret i1
+; CHECK-LABEL: @test8(
+; CHECK-NEXT: and i32 %X, -2
+; CHECK-NEXT: icmp eq i32 {{.*}}, 8
+; CHECK-NEXT: ret i1
}
@GA = internal constant [4 x { i32, i32 } ] [
%Q = load i32, i32* %P
%R = icmp eq i32 %Q, 1
ret i1 %R
-; NODL-LABEL: @test9(
-; NODL-NEXT: add i32 %X, -1
-; NODL-NEXT: icmp ult i32 {{.*}}, 2
-; NODL-NEXT: ret i1
-
-; P32-LABEL: @test9(
-; P32-NEXT: add i32 %X, -1
-; P32-NEXT: icmp ult i32 {{.*}}, 2
-; P32-NEXT: ret i1
+; CHECK-LABEL: @test9(
+; CHECK-NEXT: add i32 %X, -1
+; CHECK-NEXT: icmp ult i32 {{.*}}, 2
+; CHECK-NEXT: ret i1
}
define i1 @test10_struct(i32 %x) {
-; NODL-LABEL: @test10_struct(
-; NODL: getelementptr inbounds %Foo, %Foo* @GS, i32 %x, i32 0
-
-; P32-LABEL: @test10_struct(
-; P32: ret i1 false
+; CHECK-LABEL: @test10_struct(
+; CHECK: ret i1 false
%p = getelementptr inbounds %Foo, %Foo* @GS, i32 %x, i32 0
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
}
define i1 @test10_struct_noinbounds(i32 %x) {
-; NODL-LABEL: @test10_struct_noinbounds(
-; NODL: getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
-
-; P32-LABEL: @test10_struct_noinbounds(
-; P32: getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
+; CHECK-LABEL: @test10_struct_noinbounds(
+; CHECK: getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
%p = getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
; Test that the GEP indices are converted before we ever get here
; Index < ptr size
define i1 @test10_struct_i16(i16 %x){
-; NODL-LABEL: @test10_struct_i16(
-; NODL: getelementptr inbounds %Foo, %Foo* @GS, i16 %x, i32 0
-
-; P32-LABEL: @test10_struct_i16(
-; P32: ret i1 false
+; CHECK-LABEL: @test10_struct_i16(
+; CHECK: ret i1 false
%p = getelementptr inbounds %Foo, %Foo* @GS, i16 %x, i32 0
%q = load i32, i32* %p
%r = icmp eq i32 %q, 0
; Test that the GEP indices are converted before we ever get here
; Index > ptr size
define i1 @test10_struct_i64(i64 %x){
-; NODL-LABEL: @test10_struct_i64(
-; NODL: getelementptr inbounds %Foo, %Foo* @GS, i64 %x, i32 0
-
-; P32-LABEL: @test10_struct_i64(
-; P32: ret i1 false
+; CHECK-LABEL: @test10_struct_i64(
+; CHECK: ret i1 false
%p = getelementptr inbounds %Foo, %Foo* @GS, i64 %x, i32 0
%q = load i32, i32* %p
%r = icmp eq i32 %q, 0
}
define i1 @test10_struct_noinbounds_i16(i16 %x) {
-; NODL-LABEL: @test10_struct_noinbounds_i16(
-; NODL: getelementptr %Foo, %Foo* @GS, i16 %x, i32 0
-
-; P32-LABEL: @test10_struct_noinbounds_i16(
-; P32: %1 = sext i16 %x to i32
-; P32: getelementptr %Foo, %Foo* @GS, i32 %1, i32 0
+; CHECK-LABEL: @test10_struct_noinbounds_i16(
+; CHECK: %1 = sext i16 %x to i32
+; CHECK: getelementptr %Foo, %Foo* @GS, i32 %1, i32 0
%p = getelementptr %Foo, %Foo* @GS, i16 %x, i32 0
%q = load i32, i32* %p
%r = icmp eq i32 %q, 0
}
define i1 @test10_struct_arr(i32 %x) {
-; NODL-LABEL: @test10_struct_arr(
-; NODL-NEXT: %r = icmp ne i32 %x, 1
-; NODL-NEXT: ret i1 %r
-
-; P32-LABEL: @test10_struct_arr(
-; P32-NEXT: %r = icmp ne i32 %x, 1
-; P32-NEXT: ret i1 %r
+; CHECK-LABEL: @test10_struct_arr(
+; CHECK-NEXT: %r = icmp ne i32 %x, 1
+; CHECK-NEXT: ret i1 %r
%p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
}
define i1 @test10_struct_arr_noinbounds(i32 %x) {
-; NODL-LABEL: @test10_struct_arr_noinbounds(
-; NODL-NEXT %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
-
-; P32-LABEL: @test10_struct_arr_noinbounds(
-; P32-NEXT %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
+; CHECK-LABEL: @test10_struct_arr_noinbounds(
+; CHECK-NEXT %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
%p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
}
define i1 @test10_struct_arr_i16(i16 %x) {
-; NODL-LABEL: @test10_struct_arr_i16(
-; NODL-NEXT: %r = icmp ne i16 %x, 1
-; NODL-NEXT: ret i1 %r
-
-; P32-LABEL: @test10_struct_arr_i16(
-; P32-NEXT: %r = icmp ne i16 %x, 1
-; P32-NEXT: ret i1 %r
+; CHECK-LABEL: @test10_struct_arr_i16(
+; CHECK-NEXT: %r = icmp ne i16 %x, 1
+; CHECK-NEXT: ret i1 %r
%p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i16 0, i16 %x, i32 2
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
}
define i1 @test10_struct_arr_i64(i64 %x) {
-; NODL-LABEL: @test10_struct_arr_i64(
-; NODL-NEXT: %r = icmp ne i64 %x, 1
-; NODL-NEXT: ret i1 %r
-
-; P32-LABEL: @test10_struct_arr_i64(
-; P32-NEXT: trunc i64 %x to i32
-; P32-NEXT: %r = icmp ne i32 %1, 1
-; P32-NEXT: ret i1 %r
+; CHECK-LABEL: @test10_struct_arr_i64(
+; CHECK-NEXT: trunc i64 %x to i32
+; CHECK-NEXT: %r = icmp ne i32 %1, 1
+; CHECK-NEXT: ret i1 %r
%p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i64 0, i64 %x, i32 2
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
}
define i1 @test10_struct_arr_noinbounds_i16(i16 %x) {
-; NODL-LABEL: @test10_struct_arr_noinbounds_i16(
-; NODL-NEXT: %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i16 %x, i32 2
-
-; P32-LABEL: @test10_struct_arr_noinbounds_i16(
-; P32-NEXT: %r = icmp ne i16 %x, 1
+; CHECK-LABEL: @test10_struct_arr_noinbounds_i16(
+; CHECK-NEXT: %r = icmp ne i16 %x, 1
%p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i16 %x, i32 2
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
}
define i1 @test10_struct_arr_noinbounds_i64(i64 %x) {
-; FIXME: Should be no trunc?
-; NODL-LABEL: @test10_struct_arr_noinbounds_i64(
-; NODL-NEXT: %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i64 %x, i32 2
-
-; P32-LABEL: @test10_struct_arr_noinbounds_i64(
-; P32: %r = icmp ne i32 %1, 1
-; P32-NEXT: ret i1 %r
+; CHECK-LABEL: @test10_struct_arr_noinbounds_i64(
+; CHECK: %r = icmp ne i32 %1, 1
+; CHECK-NEXT: ret i1 %r
%p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i64 %x, i32 2
%q = load i32, i32* %p
%r = icmp eq i32 %q, 9
ret <4 x i32> %vcgez.i
}
+
+; The last test needs this weird datalayout.
+target datalayout = "i32:8:8"
+; Without it, InstCombine will align the pointed on 4 Bytes
+; The KnownBitsZero that result from the alignment allows to
+; turn:
+; and i32 %mul, 255
+; to:
+; and i32 %mul, 252
+; The mask is no longer in the form 2^n-1 and this prevents the transformation.
+
@pr21445_data = external global i32
define i1 @pr21445(i8 %a) {
; CHECK-LABEL: @pr21445(
; RUN: opt < %s -instcombine -S | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
define void @PR21651() {
switch i2 0, label %out [
i2 0, label %out
; RUN: opt -S < %s -instcombine | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:128:128-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S32"
@G = constant [3 x i8] c"%s\00" ; <[3 x i8]*> [#uses=1]
ret i8* %tmp3
; CHECK-LABEL: @test1(
-; CHECK: ret i8* getelementptr inbounds ([5 x i8]* @str, i32 0, i64 3)
+; CHECK: ret i8* getelementptr inbounds ([5 x i8]* @str, i32 0, i32 3)
}
declare i8* @strchr(i8*, i32)
ret i8* %tmp3
; CHECK-LABEL: @test2(
-; CHECK: ret i8* getelementptr inbounds ([8 x i8]* @str1, i32 0, i64 7)
+; CHECK: ret i8* getelementptr inbounds ([8 x i8]* @str1, i32 0, i32 7)
}
define i8* @test3() {
; CHECK-LABEL: @test3(
; CHECK-NOT: alloca
; CHECK: Cont:
-; CHECK-NEXT: %storemerge = phi i32 [ 47, %Cond2 ], [ -987654321, %Cond ]
+; CHECK-NEXT: %storemerge = phi i32 [ -987654321, %Cond ], [ 47, %Cond2 ]
; CHECK-NEXT: ret i32 %storemerge
}
; cleaning up the alloca/store/GEP/load.
+; Provide legal integer types.
+target datalayout = "p:32:32"
+
+
; Extracting the zeroth element in an i32 array.
define i32 @type_pun_zeroth(<16 x i8> %in) {
; CHECK-LABEL: @type_pun_zeroth(
; RUN: opt -S < %s -indvars | opt -analyze -iv-users | grep "%cmp = icmp slt i32" | grep "= {%\.ph,+,1}<%for.cond>"
; PR8079
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
; LoopSimplify should invalidate indvars when splitting out the
; inner loop.
; RUN: opt < %s -loop-reduce -S | grep add | count 2
; PR 2662
+
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
@g_3 = common global i16 0 ; <i16*> [#uses=2]
@"\01LC" = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
; RUN: opt < %s -loop-reduce -S | grep phi | count 2
; PR 2779
+
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
@g_19 = common global i32 0 ; <i32*> [#uses=3]
@"\01LC" = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
target triple = "x86-apple-darwin"
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
; Verify that identical edges are merged. rdar://problem/6453893
; CHECK-LABEL: @test1(
; CHECK: bb89:
target triple = "x86-apple-darwin"
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
; CHECK-LABEL: @test(
; multiplies are hoisted out of the loop
; CHECK: while.body.lr.ph:
; CHECK: phi
; CHECK: phi
; CHECK-NOT: phi
-; CHECK: bitcast float* {{.*}} to i8*
-; CHECK: bitcast float* {{.*}} to i8*
-; CHECK: getelementptr i8, i8*
-; CHECK: getelementptr i8, i8*
+; CHECK: bitcast float* {{.*}} to i1*
+; CHECK: bitcast float* {{.*}} to i1*
+; CHECK: getelementptr i1, i1*
+; CHECK: getelementptr i1, i1*
define float @test(float* nocapture %A, float* nocapture %B, i32 %N, i32 %IA, i32 %IB) nounwind uwtable readonly ssp {
entry:
-; RUN: opt -loop-reduce -disable-output -debug-only=loop-reduce < %s 2> %t
-; RUN: FileCheck %s < %t
+; RUN: opt -loop-reduce -disable-output -debug-only=loop-reduce < %s 2>&1 | FileCheck %s
; REQUIRES: asserts
;
; PR13361: LSR + SCEV "hangs" on reasonably sized test with sequence of loops
; CHECK-NOT:reg
; CHECK: Filtering for use
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
%struct.snork = type { %struct.fuga, i32, i32, i32, i32, i32, i32 }
%struct.fuga = type { %struct.gork, i64 }
%struct.gork = type { i8*, i32, i32, %struct.noot* }
; RUN: opt < %s -loop-reduce -S -mtriple=x86_64-unknown-unknown | grep "phi double" | count 1
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
define void @foobar(i32 %n) nounwind {
entry:
icmp eq i32 %n, 0 ; <i1>:0 [#uses=2]
; nonzero initial value.
; rdar://9786536
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
+
; First, make sure LSR doesn't crash on an empty IVUsers list.
; CHECK-LABEL: @dummyIV(
; CHECK-NOT: phi
; RUN: opt < %s -loop-reduce -S | FileCheck %s
; rdar://7382068
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
define void @t(i32 %c) nounwind optsize {
entry:
br label %bb6
; Don't reverse the iteration if the rhs of the compare is defined
; inside the loop.
+; Provide legal integer types.
+; Declare i2 as legal so that IVUsers accepts to consider %indvar3451
+target datalayout = "n2:8:16:32:64"
+
define void @Fill_Buffer(i2* %p) nounwind {
entry:
br label %bb8
; A sign extend feeds an IVUser and cannot be hoisted into the AddRec.
; CollectIVChains should bailout on this case.
+
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
%struct = type { i8*, i8*, i16, i64, i16, i16, i16, i64, i64, i16, i8*, i64, i64, i64 }
; CHECK-LABEL: @test(
; RUN: opt < %s -loop-reduce -S | not grep mul
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
; Make sure we don't get a multiply by 6 in this loop.
define i32 @foo(i32 %A, i32 %B, i32 %C, i32 %D) {
; RUN: opt < %s -loop-reduce -S | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
@d = common global i32 0, align 4
define void @fn2(i32 %x) nounwind uwtable {
; RUN: opt < %s -analyze -iv-users | FileCheck %s
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
; The value of %r is dependent on a polynomial iteration expression.
;
; CHECK-LABEL: IV Users for loop %foo.loop
; Check that this test makes INDVAR and related stuff dead.
; RUN: opt < %s -loop-reduce -S | not grep INDVAR
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
declare i1 @pred()
define void @test(i32* %P) {
; Check that variable strides are reduced to adds instead of multiplies.
; RUN: opt < %s -loop-reduce -S | not grep mul
+; Provide legal integer types.
+target datalayout = "n8:16:32:64"
+
declare i1 @pred(i32)
define void @test([10000 x i32]* %P, i32 %STRIDE) {
; CHECK: test1
; The loop body contains two increments by %div.
; Make sure that 2*%div is recognizable, and not expressed as a bit mask of %d.
-; CHECK: --> {%p,+,(2 * (%d /u 4) * sizeof(i32))}
+; CHECK: --> {%p,+,(8 * (%d /u 4))}
define void @test1(i64 %d, i32* %p) nounwind uwtable ssp {
entry:
%div = udiv i64 %d, 4
; CHECK: test1a
; Same thing as test1, but it is even more tempting to fold 2 * (%d /u 2)
-; CHECK: --> {%p,+,(2 * (%d /u 2) * sizeof(i32))}
+; CHECK: --> {%p,+,(8 * (%d /u 2))}
define void @test1a(i64 %d, i32* %p) nounwind uwtable ssp {
entry:
%div = udiv i64 %d, 2
; RUN: opt < %s -scalarrepl -S | not grep shr
+; FIXME: I think this test is no longer valid.
+; It was working because SROA was aborting when
+; no datalayout was supplied
+; XFAIL: *
+
+
%struct.S = type { i16 }
define zeroext i1 @f(i16 signext %b) {
+++ /dev/null
-; RUN: opt %s -scalarizer -scalarize-load-store -S | FileCheck %s
-
-; Test the handling of loads and stores when no data layout is available.
-define void @f1(<4 x float> *%dest, <4 x float> *%src) {
-; CHECK: @f1(
-; CHECK: %val = load <4 x float>, <4 x float>* %src, align 4
-; CHECK: %val.i0 = extractelement <4 x float> %val, i32 0
-; CHECK: %add.i0 = fadd float %val.i0, %val.i0
-; CHECK: %val.i1 = extractelement <4 x float> %val, i32 1
-; CHECK: %add.i1 = fadd float %val.i1, %val.i1
-; CHECK: %val.i2 = extractelement <4 x float> %val, i32 2
-; CHECK: %add.i2 = fadd float %val.i2, %val.i2
-; CHECK: %val.i3 = extractelement <4 x float> %val, i32 3
-; CHECK: %add.i3 = fadd float %val.i3, %val.i3
-; CHECK: %add.upto0 = insertelement <4 x float> undef, float %add.i0, i32 0
-; CHECK: %add.upto1 = insertelement <4 x float> %add.upto0, float %add.i1, i32 1
-; CHECK: %add.upto2 = insertelement <4 x float> %add.upto1, float %add.i2, i32 2
-; CHECK: %add = insertelement <4 x float> %add.upto2, float %add.i3, i32 3
-; CHECK: store <4 x float> %add, <4 x float>* %dest, align 8
-; CHECK: ret void
- %val = load <4 x float> , <4 x float> *%src, align 4
- %add = fadd <4 x float> %val, %val
- store <4 x float> %add, <4 x float> *%dest, align 8
- ret void
-}
// Add the target data from the target machine, if it exists, or the module.
if (const DataLayout *DL = Target->getDataLayout())
- M->setDataLayout(DL);
- PM.add(new DataLayoutPass());
+ M->setDataLayout(*DL);
if (RelaxAll.getNumOccurrences() > 0 &&
FileType != TargetMachine::CGFT_ObjectFile)
// In addition to deleting all other functions, we also want to spiff it
// up a little bit. Do this now.
legacy::PassManager Passes;
- Passes.add(new DataLayoutPass()); // Use correct DataLayout
std::vector<GlobalValue*> Gvs(GVs.begin(), GVs.end());
Passes.add(new TargetLibraryInfoWrapperPass(TLII));
// Add an appropriate DataLayout instance for this module.
- const DataLayout *DL = M->getDataLayout();
- if (!DL && !DefaultDataLayout.empty()) {
+ const DataLayout &DL = M->getDataLayout();
+ if (DL.isDefault() && !DefaultDataLayout.empty()) {
M->setDataLayout(DefaultDataLayout);
- DL = M->getDataLayout();
}
- if (DL)
- Passes.add(new DataLayoutPass());
-
// Add internal analysis passes from the target machine.
Passes.add(createTargetTransformInfoWrapperPass(TM ? TM->getTargetIRAnalysis()
: TargetIRAnalysis()));
std::unique_ptr<legacy::FunctionPassManager> FPasses;
if (OptLevelO1 || OptLevelO2 || OptLevelOs || OptLevelOz || OptLevelO3) {
FPasses.reset(new legacy::FunctionPassManager(M.get()));
- if (DL)
- FPasses->add(new DataLayoutPass());
FPasses->add(createTargetTransformInfoWrapperPass(
TM ? TM->getTargetIRAnalysis() : TargetIRAnalysis()));
}
TEST_F(IRBuilderTest, DataLayout) {
std::unique_ptr<Module> M(new Module("test", Ctx));
M->setDataLayout("e-n32");
- EXPECT_TRUE(M->getDataLayout()->isLegalInteger(32));
+ EXPECT_TRUE(M->getDataLayout().isLegalInteger(32));
M->setDataLayout("e");
- EXPECT_FALSE(M->getDataLayout()->isLegalInteger(32));
+ EXPECT_FALSE(M->getDataLayout().isLegalInteger(32));
}
TEST_F(IRBuilderTest, GetIntTy) {
initializeModuleNDMPass(*PassRegistry::getPassRegistry());
}
bool runOnModule(Module &M) override {
- EXPECT_TRUE(getAnalysisIfAvailable<DataLayoutPass>());
run++;
return false;
}
initializeCGPassPass(*PassRegistry::getPassRegistry());
}
bool runOnSCC(CallGraphSCC &SCMM) override {
- EXPECT_TRUE(getAnalysisIfAvailable<DataLayoutPass>());
run();
return false;
}
return false;
}
bool runOnLoop(Loop *L, LPPassManager &LPM) override {
- EXPECT_TRUE(getAnalysisIfAvailable<DataLayoutPass>());
run();
return false;
}
return false;
}
bool runOnBasicBlock(BasicBlock &BB) override {
- EXPECT_TRUE(getAnalysisIfAvailable<DataLayoutPass>());
run();
return false;
}
initializeFPassPass(*PassRegistry::getPassRegistry());
}
bool runOnModule(Module &M) override {
- EXPECT_TRUE(getAnalysisIfAvailable<DataLayoutPass>());
for (Module::iterator I=M.begin(),E=M.end(); I != E; ++I) {
Function &F = *I;
{
mNDM->run = mNDNM->run = mDNM->run = mNDM2->run = 0;
legacy::PassManager Passes;
- Passes.add(new DataLayoutPass());
Passes.add(mNDM2);
Passes.add(mNDM);
Passes.add(mNDNM);
mNDM->run = mNDNM->run = mDNM->run = mNDM2->run = 0;
legacy::PassManager Passes;
- Passes.add(new DataLayoutPass());
Passes.add(mNDM);
Passes.add(mNDNM);
Passes.add(mNDM2);// invalidates mNDM needed by mDNM
std::unique_ptr<Module> M(makeLLVMModule());
T *P = new T();
legacy::PassManager Passes;
- Passes.add(new DataLayoutPass());
Passes.add(P);
Passes.run(*M);
T::finishedOK(run);
Module *M = makeLLVMModule();
T *P = new T();
legacy::PassManager Passes;
- Passes.add(new DataLayoutPass());
Passes.add(P);
Passes.run(*M);
T::finishedOK(run, N);
SCOPED_TRACE("Running OnTheFlyTest");
struct OnTheFlyTest *O = new OnTheFlyTest();
legacy::PassManager Passes;
- Passes.add(new DataLayoutPass());
Passes.add(O);
Passes.run(*M);