summaryrefslogtreecommitdiffstats
path: root/lib/Transforms/Scalar
diff options
context:
space:
mode:
authorMicah Villmow <villmow@gmail.com>2012-10-08 16:38:25 +0000
committerMicah Villmow <villmow@gmail.com>2012-10-08 16:38:25 +0000
commit3574eca1b02600bac4e625297f4ecf745f4c4f32 (patch)
tree197d30c8bd3a1505b260b9d2ead2b4d778ecbe9e /lib/Transforms/Scalar
parent2b4b44e0d2e95fc695eafcc4d192fe1ae261e01e (diff)
downloadexternal_llvm-3574eca1b02600bac4e625297f4ecf745f4c4f32.zip
external_llvm-3574eca1b02600bac4e625297f4ecf745f4c4f32.tar.gz
external_llvm-3574eca1b02600bac4e625297f4ecf745f4c4f32.tar.bz2
Move TargetData to DataLayout.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@165402 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/Scalar')
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp10
-rw-r--r--lib/Transforms/Scalar/ConstantProp.cpp4
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp16
-rw-r--r--lib/Transforms/Scalar/EarlyCSE.cpp6
-rw-r--r--lib/Transforms/Scalar/GVN.cpp30
-rw-r--r--lib/Transforms/Scalar/GlobalMerge.cpp10
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp12
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp6
-rw-r--r--lib/Transforms/Scalar/LICM.cpp6
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp8
-rw-r--r--lib/Transforms/Scalar/LoopInstSimplify.cpp4
-rw-r--r--lib/Transforms/Scalar/LoopUnrollPass.cpp6
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp18
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp10
-rw-r--r--lib/Transforms/Scalar/SROA.cpp46
-rw-r--r--lib/Transforms/Scalar/Scalar.cpp2
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp20
-rw-r--r--lib/Transforms/Scalar/SimplifyCFGPass.cpp6
-rw-r--r--lib/Transforms/Scalar/SimplifyLibCalls.cpp36
19 files changed, 128 insertions, 128 deletions
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 6d0186f..ca06118 100644
--- a/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -38,7 +38,7 @@
#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Transforms/Utils/AddrModeMatcher.h"
@@ -623,7 +623,7 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
// happens.
WeakVH IterHandle(CurInstIterator);
- replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getTargetData() : 0,
+ replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getDataLayout() : 0,
TLInfo, ModifiedDT ? 0 : DT);
// If the iterator instruction was recursively deleted, start over at the
@@ -647,8 +647,8 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
// From here on out we're working with named functions.
if (CI->getCalledFunction() == 0) return false;
- // We'll need TargetData from here on out.
- const TargetData *TD = TLI ? TLI->getTargetData() : 0;
+ // We'll need DataLayout from here on out.
+ const DataLayout *TD = TLI ? TLI->getDataLayout() : 0;
if (!TD) return false;
// Lower all default uses of _chk calls. This is very similar
@@ -930,7 +930,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst);
Type *IntPtrTy =
- TLI->getTargetData()->getIntPtrType(AccessTy->getContext());
+ TLI->getDataLayout()->getIntPtrType(AccessTy->getContext());
Value *Result = 0;
diff --git a/lib/Transforms/Scalar/ConstantProp.cpp b/lib/Transforms/Scalar/ConstantProp.cpp
index 5430f62..369720b 100644
--- a/lib/Transforms/Scalar/ConstantProp.cpp
+++ b/lib/Transforms/Scalar/ConstantProp.cpp
@@ -24,7 +24,7 @@
#include "llvm/Constant.h"
#include "llvm/Instruction.h"
#include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/InstIterator.h"
#include "llvm/ADT/Statistic.h"
@@ -67,7 +67,7 @@ bool ConstantPropagation::runOnFunction(Function &F) {
WorkList.insert(&*i);
}
bool Changed = false;
- TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
while (!WorkList.empty()) {
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 301ee2f..602e5a4 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -29,7 +29,7 @@
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/Debug.h"
@@ -199,7 +199,7 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
// If we don't have target data around, an unknown size in Location means
// that we should use the size of the pointee type. This isn't valid for
// memset/memcpy, which writes more than an i8.
- if (Loc.Size == AliasAnalysis::UnknownSize && AA.getTargetData() == 0)
+ if (Loc.Size == AliasAnalysis::UnknownSize && AA.getDataLayout() == 0)
return AliasAnalysis::Location();
return Loc;
}
@@ -213,7 +213,7 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
// If we don't have target data around, an unknown size in Location means
// that we should use the size of the pointee type. This isn't valid for
// init.trampoline, which writes more than an i8.
- if (AA.getTargetData() == 0) return AliasAnalysis::Location();
+ if (AA.getDataLayout() == 0) return AliasAnalysis::Location();
// FIXME: We don't know the size of the trampoline, so we can't really
// handle it here.
@@ -318,7 +318,7 @@ static Value *getStoredPointerOperand(Instruction *I) {
static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
uint64_t Size;
- if (getObjectSize(V, Size, AA.getTargetData(), AA.getTargetLibraryInfo()))
+ if (getObjectSize(V, Size, AA.getDataLayout(), AA.getTargetLibraryInfo()))
return Size;
return AliasAnalysis::UnknownSize;
}
@@ -351,10 +351,10 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// comparison.
if (Later.Size == AliasAnalysis::UnknownSize ||
Earlier.Size == AliasAnalysis::UnknownSize) {
- // If we have no TargetData information around, then the size of the store
+ // If we have no DataLayout information around, then the size of the store
// is inferrable from the pointee type. If they are the same type, then
// we know that the store is safe.
- if (AA.getTargetData() == 0 &&
+ if (AA.getDataLayout() == 0 &&
Later.Ptr->getType() == Earlier.Ptr->getType())
return OverwriteComplete;
@@ -370,13 +370,13 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// larger than the earlier one.
if (Later.Size == AliasAnalysis::UnknownSize ||
Earlier.Size == AliasAnalysis::UnknownSize ||
- AA.getTargetData() == 0)
+ AA.getDataLayout() == 0)
return OverwriteUnknown;
// Check to see if the later store is to the entire object (either a global,
// an alloca, or a byval argument). If so, then it clearly overwrites any
// other store to the same object.
- const TargetData &TD = *AA.getTargetData();
+ const DataLayout &TD = *AA.getDataLayout();
const Value *UO1 = GetUnderlyingObject(P1, &TD),
*UO2 = GetUnderlyingObject(P2, &TD);
diff --git a/lib/Transforms/Scalar/EarlyCSE.cpp b/lib/Transforms/Scalar/EarlyCSE.cpp
index 258a961..e57c8c5 100644
--- a/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -18,7 +18,7 @@
#include "llvm/Pass.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/Debug.h"
@@ -216,7 +216,7 @@ namespace {
/// cases.
class EarlyCSE : public FunctionPass {
public:
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLibraryInfo *TLI;
DominatorTree *DT;
typedef RecyclingAllocator<BumpPtrAllocator,
@@ -508,7 +508,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
bool EarlyCSE::runOnFunction(Function &F) {
std::deque<StackNode *> nodesToProcess;
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTree>();
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index e67d8af..eb0da20 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -41,7 +41,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/PatternMatch.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -503,7 +503,7 @@ namespace {
bool NoLoads;
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLibraryInfo *TLI;
ValueTable VN;
@@ -535,7 +535,7 @@ namespace {
InstrsToErase.push_back(I);
}
- const TargetData *getTargetData() const { return TD; }
+ const DataLayout *getDataLayout() const { return TD; }
DominatorTree &getDominatorTree() const { return *DT; }
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
MemoryDependenceAnalysis &getMemDep() const { return *MD; }
@@ -730,7 +730,7 @@ SpeculationFailure:
/// CoerceAvailableValueToLoadType will succeed.
static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
Type *LoadTy,
- const TargetData &TD) {
+ const DataLayout &TD) {
// If the loaded or stored value is an first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
@@ -756,7 +756,7 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
Type *LoadedTy,
Instruction *InsertPt,
- const TargetData &TD) {
+ const DataLayout &TD) {
if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
return 0;
@@ -842,7 +842,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
Value *WritePtr,
uint64_t WriteSizeInBits,
- const TargetData &TD) {
+ const DataLayout &TD) {
// If the loaded or stored value is a first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
if (LoadTy->isStructTy() || LoadTy->isArrayTy())
@@ -915,7 +915,7 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
/// memdep query of a load that ends up being a clobbering store.
static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
StoreInst *DepSI,
- const TargetData &TD) {
+ const DataLayout &TD) {
// Cannot handle reading from store of first-class aggregate yet.
if (DepSI->getValueOperand()->getType()->isStructTy() ||
DepSI->getValueOperand()->getType()->isArrayTy())
@@ -931,7 +931,7 @@ static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
/// memdep query of a load that ends up being clobbered by another load. See if
/// the other load can feed into the second load.
static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
- LoadInst *DepLI, const TargetData &TD){
+ LoadInst *DepLI, const DataLayout &TD){
// Cannot handle reading from store of first-class aggregate yet.
if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy())
return -1;
@@ -959,7 +959,7 @@ static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
MemIntrinsic *MI,
- const TargetData &TD) {
+ const DataLayout &TD) {
// If the mem operation is a non-constant size, we can't handle it.
ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
if (SizeCst == 0) return -1;
@@ -1009,7 +1009,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
/// before we give up.
static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
Type *LoadTy,
- Instruction *InsertPt, const TargetData &TD){
+ Instruction *InsertPt, const DataLayout &TD){
LLVMContext &Ctx = SrcVal->getType()->getContext();
uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
@@ -1048,7 +1048,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
GVN &gvn) {
- const TargetData &TD = *gvn.getTargetData();
+ const DataLayout &TD = *gvn.getDataLayout();
// If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to
// widen SrcVal out to a larger load.
unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType());
@@ -1107,7 +1107,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
/// memdep query of a load that ends up being a clobbering mem intrinsic.
static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
- const TargetData &TD){
+ const DataLayout &TD){
LLVMContext &Ctx = LoadTy->getContext();
uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
@@ -1231,7 +1231,7 @@ struct AvailableValueInBlock {
if (isSimpleValue()) {
Res = getSimpleValue();
if (Res->getType() != LoadTy) {
- const TargetData *TD = gvn.getTargetData();
+ const DataLayout *TD = gvn.getDataLayout();
assert(TD && "Need target data to handle type mismatch case");
Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
*TD);
@@ -1253,7 +1253,7 @@ struct AvailableValueInBlock {
<< *Res << '\n' << "\n\n\n");
}
} else {
- const TargetData *TD = gvn.getTargetData();
+ const DataLayout *TD = gvn.getDataLayout();
assert(TD && "Need target data to handle type mismatch case");
Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
LoadTy, BB->getTerminator(), *TD);
@@ -2295,7 +2295,7 @@ bool GVN::runOnFunction(Function& F) {
if (!NoLoads)
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
VN.setMemDep(MD);
diff --git a/lib/Transforms/Scalar/GlobalMerge.cpp b/lib/Transforms/Scalar/GlobalMerge.cpp
index b36a3cb..6301aad 100644
--- a/lib/Transforms/Scalar/GlobalMerge.cpp
+++ b/lib/Transforms/Scalar/GlobalMerge.cpp
@@ -62,7 +62,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/ADT/Statistic.h"
@@ -98,9 +98,9 @@ namespace {
}
struct GlobalCmp {
- const TargetData *TD;
+ const DataLayout *TD;
- GlobalCmp(const TargetData *td) : TD(td) { }
+ GlobalCmp(const DataLayout *td) : TD(td) { }
bool operator()(const GlobalVariable *GV1, const GlobalVariable *GV2) {
Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
@@ -119,7 +119,7 @@ INITIALIZE_PASS(GlobalMerge, "global-merge",
bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Module &M, bool isConst) const {
- const TargetData *TD = TLI->getTargetData();
+ const DataLayout *TD = TLI->getDataLayout();
// FIXME: Infer the maximum possible offset depending on the actual users
// (these max offsets are different for the users inside Thumb or ARM
@@ -170,7 +170,7 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
bool GlobalMerge::doInitialization(Module &M) {
SmallVector<GlobalVariable*, 16> Globals, ConstGlobals, BSSGlobals;
- const TargetData *TD = TLI->getTargetData();
+ const DataLayout *TD = TLI->getDataLayout();
unsigned MaxOffset = TLI->getMaximalGlobalOffset();
bool Changed = false;
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index af0ee34..78630b2 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -43,7 +43,7 @@
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SimplifyIndVar.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
@@ -68,7 +68,7 @@ namespace {
LoopInfo *LI;
ScalarEvolution *SE;
DominatorTree *DT;
- TargetData *TD;
+ DataLayout *TD;
TargetLibraryInfo *TLI;
SmallVector<WeakVH, 16> DeadInsts;
@@ -597,13 +597,13 @@ namespace {
class WideIVVisitor : public IVVisitor {
ScalarEvolution *SE;
- const TargetData *TD;
+ const DataLayout *TD;
public:
WideIVInfo WI;
WideIVVisitor(PHINode *NarrowIV, ScalarEvolution *SCEV,
- const TargetData *TData) :
+ const DataLayout *TData) :
SE(SCEV), TD(TData) { WI.NarrowIV = NarrowIV; }
// Implement the interface used by simplifyUsersOfIV.
@@ -1346,7 +1346,7 @@ static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
/// could at least handle constant BECounts.
static PHINode *
FindLoopCounter(Loop *L, const SCEV *BECount,
- ScalarEvolution *SE, DominatorTree *DT, const TargetData *TD) {
+ ScalarEvolution *SE, DominatorTree *DT, const DataLayout *TD) {
uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType());
Value *Cond =
@@ -1703,7 +1703,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfo>();
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
DeadInsts.clear();
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 20844c6..e7ffa09 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -23,7 +23,7 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
@@ -75,7 +75,7 @@ namespace {
/// revectored to the false side of the second if.
///
class JumpThreading : public FunctionPass {
- TargetData *TD;
+ DataLayout *TD;
TargetLibraryInfo *TLI;
LazyValueInfo *LVI;
#ifdef NDEBUG
@@ -147,7 +147,7 @@ FunctionPass *llvm::createJumpThreadingPass() { return new JumpThreading(); }
///
bool JumpThreading::runOnFunction(Function &F) {
DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
LVI = &getAnalysis<LazyValueInfo>();
diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp
index 99bedce..4818437 100644
--- a/lib/Transforms/Scalar/LICM.cpp
+++ b/lib/Transforms/Scalar/LICM.cpp
@@ -46,7 +46,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
@@ -100,7 +100,7 @@ namespace {
LoopInfo *LI; // Current LoopInfo
DominatorTree *DT; // Dominator Tree for the current Loop.
- TargetData *TD; // TargetData for constant folding.
+ DataLayout *TD; // DataLayout for constant folding.
TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding.
// State that is updated as we process loops.
@@ -207,7 +207,7 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
CurAST = new AliasSetTracker(*AA);
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 1553328..a44e798 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -54,7 +54,7 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -65,7 +65,7 @@ STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
namespace {
class LoopIdiomRecognize : public LoopPass {
Loop *CurLoop;
- const TargetData *TD;
+ const DataLayout *TD;
DominatorTree *DT;
ScalarEvolution *SE;
TargetLibraryInfo *TLI;
@@ -199,7 +199,7 @@ bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
return false;
// We require target data for now.
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
if (TD == 0) return false;
DT = &getAnalysis<DominatorTree>();
@@ -408,7 +408,7 @@ static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access,
///
/// Note that we don't ever attempt to use memset_pattern8 or 4, because these
/// just replicate their input array and then pass on to memset_pattern16.
-static Constant *getMemSetPatternValue(Value *V, const TargetData &TD) {
+static Constant *getMemSetPatternValue(Value *V, const DataLayout &TD) {
// If the value isn't a constant, we can't promote it to being in a constant
// array. We could theoretically do a store to an alloca or something, but
// that doesn't seem worthwhile.
diff --git a/lib/Transforms/Scalar/LoopInstSimplify.cpp b/lib/Transforms/Scalar/LoopInstSimplify.cpp
index f5daa7b..558f62e 100644
--- a/lib/Transforms/Scalar/LoopInstSimplify.cpp
+++ b/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -18,7 +18,7 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -66,7 +66,7 @@ Pass *llvm::createLoopInstSimplifyPass() {
bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
LoopInfo *LI = &getAnalysis<LoopInfo>();
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SmallVector<BasicBlock*, 8> ExitBlocks;
diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp
index f8709a5..f412c38 100644
--- a/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -22,7 +22,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/UnrollLoop.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include <climits>
using namespace llvm;
@@ -113,7 +113,7 @@ Pass *llvm::createLoopUnrollPass(int Threshold, int Count, int AllowPartial) {
/// ApproximateLoopSize - Approximate the size of the loop.
static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
- const TargetData *TD) {
+ const DataLayout *TD) {
CodeMetrics Metrics;
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I)
@@ -178,7 +178,7 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
// Enforce the threshold.
if (Threshold != NoThreshold) {
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
unsigned NumInlineCandidates;
unsigned LoopSize = ApproximateLoopSize(L, NumInlineCandidates, TD);
DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n");
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index fba4aae..517657c 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -27,7 +27,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include <list>
@@ -39,7 +39,7 @@ STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
- bool &VariableIdxFound, const TargetData &TD){
+ bool &VariableIdxFound, const DataLayout &TD){
// Skip over the first indices.
gep_type_iterator GTI = gep_type_begin(GEP);
for (unsigned i = 1; i != Idx; ++i, ++GTI)
@@ -72,7 +72,7 @@ static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
/// constant offset, and return that constant offset. For example, Ptr1 might
/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
- const TargetData &TD) {
+ const DataLayout &TD) {
Ptr1 = Ptr1->stripPointerCasts();
Ptr2 = Ptr2->stripPointerCasts();
GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
@@ -141,12 +141,12 @@ struct MemsetRange {
/// TheStores - The actual stores that make up this range.
SmallVector<Instruction*, 16> TheStores;
- bool isProfitableToUseMemset(const TargetData &TD) const;
+ bool isProfitableToUseMemset(const DataLayout &TD) const;
};
} // end anon namespace
-bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
+bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const {
// If we found more than 4 stores to merge or 16 bytes, use memset.
if (TheStores.size() >= 4 || End-Start >= 16) return true;
@@ -192,9 +192,9 @@ class MemsetRanges {
/// because each element is relatively large and expensive to copy.
std::list<MemsetRange> Ranges;
typedef std::list<MemsetRange>::iterator range_iterator;
- const TargetData &TD;
+ const DataLayout &TD;
public:
- MemsetRanges(const TargetData &td) : TD(td) {}
+ MemsetRanges(const DataLayout &td) : TD(td) {}
typedef std::list<MemsetRange>::const_iterator const_iterator;
const_iterator begin() const { return Ranges.begin(); }
@@ -302,7 +302,7 @@ namespace {
class MemCpyOpt : public FunctionPass {
MemoryDependenceAnalysis *MD;
TargetLibraryInfo *TLI;
- const TargetData *TD;
+ const DataLayout *TD;
public:
static char ID; // Pass identification, replacement for typeid
MemCpyOpt() : FunctionPass(ID) {
@@ -1000,7 +1000,7 @@ bool MemCpyOpt::iterateOnFunction(Function &F) {
bool MemCpyOpt::runOnFunction(Function &F) {
bool MadeChange = false;
MD = &getAnalysis<MemoryDependenceAnalysis>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
// If we don't have at least memset and memcpy, there is little point of doing
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index 2c39aab..686520e 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -26,7 +26,7 @@
#include "llvm/Pass.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
@@ -153,7 +153,7 @@ namespace {
/// Constant Propagation.
///
class SCCPSolver : public InstVisitor<SCCPSolver> {
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLibraryInfo *TLI;
SmallPtrSet<BasicBlock*, 8> BBExecutable; // The BBs that are executable.
DenseMap<Value*, LatticeVal> ValueState; // The state each value is in.
@@ -205,7 +205,7 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
typedef std::pair<BasicBlock*, BasicBlock*> Edge;
DenseSet<Edge> KnownFeasibleEdges;
public:
- SCCPSolver(const TargetData *td, const TargetLibraryInfo *tli)
+ SCCPSolver(const DataLayout *td, const TargetLibraryInfo *tli)
: TD(td), TLI(tli) {}
/// MarkBlockExecutable - This method can be used by clients to mark all of
@@ -1564,7 +1564,7 @@ static void DeleteInstructionInBlock(BasicBlock *BB) {
//
bool SCCP::runOnFunction(Function &F) {
DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SCCPSolver Solver(TD, TLI);
@@ -1693,7 +1693,7 @@ static bool AddressIsTaken(const GlobalValue *GV) {
}
bool IPSCCP::runOnModule(Module &M) {
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SCCPSolver Solver(TD, TLI);
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index 380555d..b279389 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -51,7 +51,7 @@
#include "llvm/Support/InstVisitor.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -183,7 +183,7 @@ public:
///
/// Construction does most of the work for partitioning the alloca. This
/// performs the necessary walks of users and builds a partitioning from it.
- AllocaPartitioning(const TargetData &TD, AllocaInst &AI);
+ AllocaPartitioning(const DataLayout &TD, AllocaInst &AI);
/// \brief Test whether a pointer to the allocation escapes our analysis.
///
@@ -411,7 +411,7 @@ template <typename DerivedT, typename RetT>
class AllocaPartitioning::BuilderBase
: public InstVisitor<DerivedT, RetT> {
public:
- BuilderBase(const TargetData &TD, AllocaInst &AI, AllocaPartitioning &P)
+ BuilderBase(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
: TD(TD),
AllocSize(TD.getTypeAllocSize(AI.getAllocatedType())),
P(P) {
@@ -419,7 +419,7 @@ public:
}
protected:
- const TargetData &TD;
+ const DataLayout &TD;
const uint64_t AllocSize;
AllocaPartitioning &P;
@@ -520,7 +520,7 @@ class AllocaPartitioning::PartitionBuilder
SmallDenseMap<Instruction *, unsigned> MemTransferPartitionMap;
public:
- PartitionBuilder(const TargetData &TD, AllocaInst &AI, AllocaPartitioning &P)
+ PartitionBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
: BuilderBase<PartitionBuilder, bool>(TD, AI, P) {}
/// \brief Run the builder over the allocation.
@@ -849,7 +849,7 @@ class AllocaPartitioning::UseBuilder : public BuilderBase<UseBuilder> {
SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
public:
- UseBuilder(const TargetData &TD, AllocaInst &AI, AllocaPartitioning &P)
+ UseBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
: BuilderBase<UseBuilder>(TD, AI, P) {}
/// \brief Run the builder over the allocation.
@@ -1115,7 +1115,7 @@ void AllocaPartitioning::splitAndMergePartitions() {
Partitions.erase(Partitions.end() - NumDeadPartitions, Partitions.end());
}
-AllocaPartitioning::AllocaPartitioning(const TargetData &TD, AllocaInst &AI)
+AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI)
:
#ifndef NDEBUG
AI(AI),
@@ -1347,7 +1347,7 @@ class SROA : public FunctionPass {
const bool RequiresDomTree;
LLVMContext *C;
- const TargetData *TD;
+ const DataLayout *TD;
DominatorTree *DT;
/// \brief Worklist of alloca instructions to simplify.
@@ -1426,12 +1426,12 @@ class PHIOrSelectSpeculator : public InstVisitor<PHIOrSelectSpeculator> {
// Befriend the base class so it can delegate to private visit methods.
friend class llvm::InstVisitor<PHIOrSelectSpeculator>;
- const TargetData &TD;
+ const DataLayout &TD;
AllocaPartitioning &P;
SROA &Pass;
public:
- PHIOrSelectSpeculator(const TargetData &TD, AllocaPartitioning &P, SROA &Pass)
+ PHIOrSelectSpeculator(const DataLayout &TD, AllocaPartitioning &P, SROA &Pass)
: TD(TD), P(P), Pass(Pass) {}
/// \brief Visit the users of an alloca partition and rewrite them.
@@ -1708,7 +1708,7 @@ private:
/// If the provided GEP is all-constant, the total byte offset formed by the
/// GEP is computed and Offset is set to it. If the GEP has any non-constant
/// operands, the function returns false and the value of Offset is unmodified.
-static bool accumulateGEPOffsets(const TargetData &TD, GEPOperator &GEP,
+static bool accumulateGEPOffsets(const DataLayout &TD, GEPOperator &GEP,
APInt &Offset) {
APInt GEPOffset(Offset.getBitWidth(), 0);
for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
@@ -1768,7 +1768,7 @@ static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr,
/// TargetTy. If we can't find one with the same type, we at least try to use
/// one with the same size. If none of that works, we just produce the GEP as
/// indicated by Indices to have the correct offset.
-static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const TargetData &TD,
+static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD,
Value *BasePtr, Type *Ty, Type *TargetTy,
SmallVectorImpl<Value *> &Indices,
const Twine &Prefix) {
@@ -1803,7 +1803,7 @@ static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const TargetData &TD,
///
/// This is the recursive step for getNaturalGEPWithOffset that walks down the
/// element types adding appropriate indices for the GEP.
-static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const TargetData &TD,
+static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD,
Value *Ptr, Type *Ty, APInt &Offset,
Type *TargetTy,
SmallVectorImpl<Value *> &Indices,
@@ -1874,7 +1874,7 @@ static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const TargetData &TD,
/// Indices, and setting Ty to the result subtype.
///
/// If no natural GEP can be constructed, this function returns null.
-static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const TargetData &TD,
+static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD,
Value *Ptr, APInt Offset, Type *TargetTy,
SmallVectorImpl<Value *> &Indices,
const Twine &Prefix) {
@@ -1914,7 +1914,7 @@ static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const TargetData &TD,
/// properities. The algorithm tries to fold as many constant indices into
/// a single GEP as possible, thus making each GEP more independent of the
/// surrounding code.
-static Value *getAdjustedPtr(IRBuilder<> &IRB, const TargetData &TD,
+static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD,
Value *Ptr, APInt Offset, Type *PointerTy,
const Twine &Prefix) {
// Even though we don't look through PHI nodes, we could be called on an
@@ -2010,7 +2010,7 @@ static Value *getAdjustedPtr(IRBuilder<> &IRB, const TargetData &TD,
/// SSA value. We only can ensure this for a limited set of operations, and we
/// don't want to do the rewrites unless we are confident that the result will
/// be promotable, so we have an early test here.
-static bool isVectorPromotionViable(const TargetData &TD,
+static bool isVectorPromotionViable(const DataLayout &TD,
Type *AllocaTy,
AllocaPartitioning &P,
uint64_t PartitionBeginOffset,
@@ -2080,7 +2080,7 @@ static bool isVectorPromotionViable(const TargetData &TD,
/// promotion to an SSA value. We only can ensure this for a limited set of
/// operations, and we don't want to do the rewrites unless we are confident
/// that the result will be promotable, so we have an early test here.
-static bool isIntegerPromotionViable(const TargetData &TD,
+static bool isIntegerPromotionViable(const DataLayout &TD,
Type *AllocaTy,
uint64_t AllocBeginOffset,
AllocaPartitioning &P,
@@ -2142,7 +2142,7 @@ class AllocaPartitionRewriter : public InstVisitor<AllocaPartitionRewriter,
// Befriend the base class so it can delegate to private visit methods.
friend class llvm::InstVisitor<AllocaPartitionRewriter, bool>;
- const TargetData &TD;
+ const DataLayout &TD;
AllocaPartitioning &P;
SROA &Pass;
AllocaInst &OldAI, &NewAI;
@@ -2176,7 +2176,7 @@ class AllocaPartitionRewriter : public InstVisitor<AllocaPartitionRewriter,
std::string NamePrefix;
public:
- AllocaPartitionRewriter(const TargetData &TD, AllocaPartitioning &P,
+ AllocaPartitionRewriter(const DataLayout &TD, AllocaPartitioning &P,
AllocaPartitioning::iterator PI,
SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI,
uint64_t NewBeginOffset, uint64_t NewEndOffset)
@@ -2791,7 +2791,7 @@ class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
// Befriend the base class so it can delegate to private visit methods.
friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>;
- const TargetData &TD;
+ const DataLayout &TD;
/// Queue of pointer uses to analyze and potentially rewrite.
SmallVector<Use *, 8> Queue;
@@ -2804,7 +2804,7 @@ class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
Use *U;
public:
- AggLoadStoreRewriter(const TargetData &TD) : TD(TD) {}
+ AggLoadStoreRewriter(const DataLayout &TD) : TD(TD) {}
/// Rewrite loads and stores through a pointer and all pointers derived from
/// it.
@@ -3004,7 +3004,7 @@ private:
/// when the size or offset cause either end of type-based partition to be off.
/// Also, this is a best-effort routine. It is reasonable to give up and not
/// return a type if necessary.
-static Type *getTypePartition(const TargetData &TD, Type *Ty,
+static Type *getTypePartition(const DataLayout &TD, Type *Ty,
uint64_t Offset, uint64_t Size) {
if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size)
return Ty;
@@ -3396,7 +3396,7 @@ namespace {
bool SROA::runOnFunction(Function &F) {
DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
C = &F.getContext();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
if (!TD) {
DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
return false;
diff --git a/lib/Transforms/Scalar/Scalar.cpp b/lib/Transforms/Scalar/Scalar.cpp
index 95b6fa7..39630fd 100644
--- a/lib/Transforms/Scalar/Scalar.cpp
+++ b/lib/Transforms/Scalar/Scalar.cpp
@@ -19,7 +19,7 @@
#include "llvm/PassManager.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/Verifier.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Transforms/Scalar.h"
using namespace llvm;
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 8090fdf..a46d09c 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -46,7 +46,7 @@
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -87,7 +87,7 @@ namespace {
private:
bool HasDomTree;
- TargetData *TD;
+ DataLayout *TD;
/// DeadInsts - Keep track of instructions we have made dead, so that
/// we can remove them after we are done working.
@@ -258,7 +258,7 @@ namespace {
class ConvertToScalarInfo {
/// AllocaSize - The size of the alloca being considered in bytes.
unsigned AllocaSize;
- const TargetData &TD;
+ const DataLayout &TD;
unsigned ScalarLoadThreshold;
/// IsNotTrivial - This is set to true if there is some access to the object
@@ -301,7 +301,7 @@ class ConvertToScalarInfo {
bool HadDynamicAccess;
public:
- explicit ConvertToScalarInfo(unsigned Size, const TargetData &td,
+ explicit ConvertToScalarInfo(unsigned Size, const DataLayout &td,
unsigned SLT)
: AllocaSize(Size), TD(td), ScalarLoadThreshold(SLT), IsNotTrivial(false),
ScalarKind(Unknown), VectorTy(0), HadNonMemTransferAccess(false),
@@ -1020,11 +1020,11 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
bool SROA::runOnFunction(Function &F) {
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
bool Changed = performPromotion(F);
- // FIXME: ScalarRepl currently depends on TargetData more than it
+ // FIXME: ScalarRepl currently depends on DataLayout more than it
// theoretically needs to. It should be refactored in order to support
// target-independent IR. Until this is done, just skip the actual
// scalar-replacement portion of this pass.
@@ -1134,7 +1134,7 @@ public:
///
/// We can do this to a select if its only uses are loads and if the operand to
/// the select can be loaded unconditionally.
-static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) {
+static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD) {
bool TDerefable = SI->getTrueValue()->isDereferenceablePointer();
bool FDerefable = SI->getFalseValue()->isDereferenceablePointer();
@@ -1172,7 +1172,7 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) {
///
/// We can do this to a select if its only uses are loads and if the operand to
/// the select can be loaded unconditionally.
-static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) {
+static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) {
// For now, we can only do this promotion if the load is in the same block as
// the PHI, and if there are no stores between the phi and load.
// TODO: Allow recursive phi users.
@@ -1236,7 +1236,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) {
/// direct (non-volatile) loads and stores to it. If the alloca is close but
/// not quite there, this will transform the code to allow promotion. As such,
/// it is a non-pure predicate.
-static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
+static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
SetVector<Instruction*, SmallVector<Instruction*, 4>,
SmallPtrSet<Instruction*, 4> > InstsToRewrite;
@@ -2537,7 +2537,7 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
/// HasPadding - Return true if the specified type has any structure or
/// alignment padding in between the elements that would be split apart
/// by SROA; return false otherwise.
-static bool HasPadding(Type *Ty, const TargetData &TD) {
+static bool HasPadding(Type *Ty, const DataLayout &TD) {
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Ty = ATy->getElementType();
return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
diff --git a/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index 6d27db1..84b820b 100644
--- a/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -31,7 +31,7 @@
#include "llvm/Attributes.h"
#include "llvm/Support/CFG.h"
#include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
@@ -293,7 +293,7 @@ static bool mergeEmptyReturnBlocks(Function &F) {
/// iterativelySimplifyCFG - Call SimplifyCFG on all the blocks in the function,
/// iterating until no more changes are made.
-static bool iterativelySimplifyCFG(Function &F, const TargetData *TD) {
+static bool iterativelySimplifyCFG(Function &F, const DataLayout *TD) {
bool Changed = false;
bool LocalChange = true;
while (LocalChange) {
@@ -316,7 +316,7 @@ static bool iterativelySimplifyCFG(Function &F, const TargetData *TD) {
// simplify the CFG.
//
bool CFGSimplifyPass::runOnFunction(Function &F) {
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
bool EverChanged = removeUnreachableBlocksFromFn(F);
EverChanged |= mergeEmptyReturnBlocks(F);
EverChanged |= iterativelySimplifyCFG(F, TD);
diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index 65311fe..15625f9 100644
--- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -31,7 +31,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Config/config.h" // FIXME: Shouldn't depend on host!
using namespace llvm;
@@ -53,7 +53,7 @@ namespace {
class LibCallOptimization {
protected:
Function *Caller;
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLibraryInfo *TLI;
LLVMContext* Context;
public:
@@ -68,7 +68,7 @@ public:
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B)
=0;
- Value *OptimizeCall(CallInst *CI, const TargetData *TD,
+ Value *OptimizeCall(CallInst *CI, const DataLayout *TD,
const TargetLibraryInfo *TLI, IRBuilder<> &B) {
Caller = CI->getParent()->getParent();
this->TD = TD;
@@ -159,7 +159,7 @@ struct StrCatOpt : public LibCallOptimization {
if (Len == 0)
return Dst;
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
return EmitStrLenMemCpy(Src, Dst, Len, B);
@@ -220,7 +220,7 @@ struct StrNCatOpt : public StrCatOpt {
// strncat(x, c, 0) -> x
if (SrcLen == 0 || Len == 0) return Dst;
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
// We don't optimize this case
@@ -251,7 +251,7 @@ struct StrChrOpt : public LibCallOptimization {
// of the input string and turn this into memchr.
ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
if (CharC == 0) {
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
uint64_t Len = GetStringLength(SrcStr);
@@ -356,7 +356,7 @@ struct StrCmpOpt : public LibCallOptimization {
uint64_t Len1 = GetStringLength(Str1P);
uint64_t Len2 = GetStringLength(Str2P);
if (Len1 && Len2) {
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
return EmitMemCmp(Str1P, Str2P,
@@ -444,7 +444,7 @@ struct StrCpyOpt : public LibCallOptimization {
if (Dst == Src) // strcpy(x,x) -> x
return Src;
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
// See if we can get the length of the input string.
@@ -481,7 +481,7 @@ struct StpCpyOpt: public LibCallOptimization {
FT->getParamType(0) != B.getInt8PtrTy())
return 0;
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
@@ -543,7 +543,7 @@ struct StrNCpyOpt : public LibCallOptimization {
if (Len == 0) return Dst; // strncpy(x, y, 0) -> x
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
// Let strncpy handle the zero padding
@@ -832,7 +832,7 @@ struct MemCmpOpt : public LibCallOptimization {
struct MemCpyOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
FunctionType *FT = Callee->getFunctionType();
@@ -854,7 +854,7 @@ struct MemCpyOpt : public LibCallOptimization {
struct MemMoveOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
FunctionType *FT = Callee->getFunctionType();
@@ -876,7 +876,7 @@ struct MemMoveOpt : public LibCallOptimization {
struct MemSetOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
FunctionType *FT = Callee->getFunctionType();
@@ -1304,7 +1304,7 @@ struct SPrintFOpt : public LibCallOptimization {
if (FormatStr[i] == '%')
return 0; // we found a format specifier, bail out.
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
@@ -1334,7 +1334,7 @@ struct SPrintFOpt : public LibCallOptimization {
}
if (FormatStr[1] == 's') {
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
// sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1)
@@ -1422,7 +1422,7 @@ struct FWriteOpt : public LibCallOptimization {
struct FPutsOpt : public LibCallOptimization {
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
// Require two pointers. Also, we can't optimize if return value is used.
@@ -1459,7 +1459,7 @@ struct FPrintFOpt : public LibCallOptimization {
if (FormatStr[i] == '%') // Could handle %% -> % if we cared.
return 0; // We found a format specifier.
- // These optimizations require TargetData.
+ // These optimizations require DataLayout.
if (!TD) return 0;
Value *NewCI = EmitFWrite(CI->getArgOperand(1),
@@ -1749,7 +1749,7 @@ bool SimplifyLibCalls::runOnFunction(Function &F) {
if (Optimizations.empty())
InitOptimizations();
- const TargetData *TD = getAnalysisIfAvailable<TargetData>();
+ const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
IRBuilder<> Builder(F.getContext());