summaryrefslogtreecommitdiffstats
path: root/lib/Analysis/ConstantFolding.cpp
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2010-02-01 18:27:38 +0000
committerDan Gohman <gohman@apple.com>2010-02-01 18:27:38 +0000
commit4f8eea82d8967cffa85b9df6c9255717b059009e (patch)
tree30c0ce1f31994e690d1e06b942a93fd36103449a /lib/Analysis/ConstantFolding.cpp
parentd569561835b0fa4dbbb0fca1b1f0a8de6c01439a (diff)
downloadexternal_llvm-4f8eea82d8967cffa85b9df6c9255717b059009e.zip
external_llvm-4f8eea82d8967cffa85b9df6c9255717b059009e.tar.gz
external_llvm-4f8eea82d8967cffa85b9df6c9255717b059009e.tar.bz2
Generalize target-independent folding rules for sizeof to handle more
cases, and implement target-independent folding rules for alignof and offsetof. Also, reassociate reassociative operators when it leads to more folding. Generalize ScalarEvolution's isOffsetOf to recognize offsetof on arrays. Rename getAllocSizeExpr to getSizeOfExpr, and getFieldOffsetExpr to getOffsetOfExpr, for consistency with analagous ConstantExpr routines. Make the target-dependent folder promote GEP array indices to pointer-sized integers, to make implicit casting explicit and exposed to subsequent folding. And add a bunch of testcases for this new functionality, and a bunch of related existing functionality. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@94987 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Analysis/ConstantFolding.cpp')
-rw-r--r--lib/Analysis/ConstantFolding.cpp38
1 files changed, 38 insertions, 0 deletions
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 4ae8859..b8e8401 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -517,6 +517,42 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
return 0;
}
+/// CastGEPIndices - If array indices are not pointer-sized integers,
+/// explicitly cast them so that they aren't implicitly casted by the
+/// getelementptr.
+static Constant *CastGEPIndices(Constant *const *Ops, unsigned NumOps,
+ const Type *ResultTy,
+ const TargetData *TD) {
+ if (!TD) return 0;
+ const Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
+
+ bool Any = false;
+ SmallVector<Constant*, 32> NewIdxs;
+ for (unsigned i = 1; i != NumOps; ++i) {
+ if ((i == 1 ||
+ !isa<StructType>(GetElementPtrInst::getIndexedType(Ops[0]->getType(),
+ reinterpret_cast<Value *const *>(Ops+1),
+ i-1))) &&
+ Ops[i]->getType() != IntPtrTy) {
+ Any = true;
+ NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
+ true,
+ IntPtrTy,
+ true),
+ Ops[i], IntPtrTy));
+ } else
+ NewIdxs.push_back(Ops[i]);
+ }
+ if (!Any) return 0;
+
+ Constant *C =
+ ConstantExpr::getGetElementPtr(Ops[0], &NewIdxs[0], NewIdxs.size());
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ C = Folded;
+ return C;
+}
+
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
/// constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
@@ -810,6 +846,8 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
case Instruction::ShuffleVector:
return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
case Instruction::GetElementPtr:
+ if (Constant *C = CastGEPIndices(Ops, NumOps, DestTy, TD))
+ return C;
if (Constant *C = SymbolicallyEvaluateGEP(Ops, NumOps, DestTy, TD))
return C;