diff options
author | Chandler Carruth <chandlerc@gmail.com> | 2012-11-01 09:14:31 +0000 |
---|---|---|
committer | Chandler Carruth <chandlerc@gmail.com> | 2012-11-01 09:14:31 +0000 |
commit | 426c2bf5cdd2173e4a33aea8cb92cf684a724f4b (patch) | |
tree | 1a088a5b297552151151ef13270417c23b057268 /lib/Transforms/Scalar | |
parent | ece6c6bb6329748b92403c06ac87f45c43485911 (diff) | |
download | external_llvm-426c2bf5cdd2173e4a33aea8cb92cf684a724f4b.zip external_llvm-426c2bf5cdd2173e4a33aea8cb92cf684a724f4b.tar.gz external_llvm-426c2bf5cdd2173e4a33aea8cb92cf684a724f4b.tar.bz2 |
Revert the majority of the next patch in the address space series:
r165941: Resubmit the changes to llvm core to update the functions to
support different pointer sizes on a per address space basis.
Despite this commit log, this change primarily changed stuff outside of
VMCore, and those changes do not carry any tests for correctness (or
even plausibility), and we have consistently found questionable or flat
out incorrect cases in these changes. Most of them are probably correct,
but we need to devise a system that makes it more clear when we have
handled the address space concerns correctly, and ideally each pass that
gets updated would receive an accompanying test case that exercises that
pass specificaly w.r.t. alternate address spaces.
However, from this commit, I have retained the new C API entry points.
Those were an orthogonal change that probably should have been split
apart, but they seem entirely good.
In several places the changes were very obvious cleanups with no actual
multiple address space code added; these I have not reverted when
I spotted them.
In a few other places there were merge conflicts due to a cleaner
solution being implemented later, often not using address spaces at all.
In those cases, I've preserved the new code which isn't address space
dependent.
This is part of my ongoing effort to clean out the partial address space
code which carries high risk and low test coverage, and not likely to be
finished before the 3.2 release looms closer. Duncan and I would both
like to see the above issues addressed before we return to these
changes.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@167222 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/Scalar')
-rw-r--r-- | lib/Transforms/Scalar/MemCpyOptimizer.cpp | 5 | ||||
-rw-r--r-- | lib/Transforms/Scalar/SROA.cpp | 10 |
2 files changed, 5 insertions, 10 deletions
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 97fff9e..517657c 100644 --- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -174,11 +174,10 @@ bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const { // this width can be stored. If so, check to see whether we will end up // actually reducing the number of stores used. unsigned Bytes = unsigned(End-Start); - unsigned AS = cast<StoreInst>(TheStores[0])->getPointerAddressSpace(); - unsigned NumPointerStores = Bytes/TD.getPointerSize(AS); + unsigned NumPointerStores = Bytes/TD.getPointerSize(); // Assume the remaining bytes if any are done a byte at a time. - unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(AS); + unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(); // If we will reduce the # stores (according to this heuristic), do the // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index 26d0712..d95c855 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -444,7 +444,6 @@ protected: bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) { GEPOffset = Offset; - unsigned int AS = GEPI.getPointerAddressSpace(); for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI); GTI != GTE; ++GTI) { ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); @@ -474,7 +473,7 @@ protected: continue; } - APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits(AS)); + APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits()); Index *= APInt(Index.getBitWidth(), TD.getTypeAllocSize(GTI.getIndexedType())); Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset, @@ -2395,8 +2394,7 @@ private: Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) { assert(BeginOffset >= NewAllocaBeginOffset); - unsigned AS = cast<PointerType>(PointerTy)->getAddressSpace(); - APInt Offset(TD.getPointerSizeInBits(AS), BeginOffset - NewAllocaBeginOffset); + APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset); return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName("")); } @@ -2793,10 +2791,8 @@ private: const AllocaPartitioning::MemTransferOffsets &MTO = P.getMemTransferOffsets(II); - assert(OldPtr->getType()->isPointerTy() && "Must be a pointer type!"); - unsigned AS = cast<PointerType>(OldPtr->getType())->getAddressSpace(); // Compute the relative offset within the transfer. - unsigned IntPtrWidth = TD.getPointerSizeInBits(AS); + unsigned IntPtrWidth = TD.getPointerSizeInBits(); APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin : MTO.SourceBegin)); |