diff options
author | Owen Anderson <resistor@mac.com> | 2009-07-22 00:24:57 +0000 |
---|---|---|
committer | Owen Anderson <resistor@mac.com> | 2009-07-22 00:24:57 +0000 |
commit | e922c0201916e0b980ab3cfe91e1413e68d55647 (patch) | |
tree | 663be741b84470d97945f01da459a3627af683fd /lib/Transforms/Scalar | |
parent | 7cf12c7efd37dc12c3ed536a3f4c373dddac2b85 (diff) | |
download | external_llvm-e922c0201916e0b980ab3cfe91e1413e68d55647.zip external_llvm-e922c0201916e0b980ab3cfe91e1413e68d55647.tar.gz external_llvm-e922c0201916e0b980ab3cfe91e1413e68d55647.tar.bz2 |
Get rid of the Pass+Context magic.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@76702 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/Scalar')
21 files changed, 210 insertions, 158 deletions
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp index e7cde0a..8c8d261 100644 --- a/lib/Transforms/Scalar/CodeGenPrepare.cpp +++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp @@ -518,7 +518,7 @@ static bool OptimizeCmpExpression(CmpInst *CI) { BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); InsertedCmp = - CmpInst::Create(*DefBB->getContext(), CI->getOpcode(), + CmpInst::Create(DefBB->getContext(), CI->getOpcode(), CI->getPredicate(), CI->getOperand(0), CI->getOperand(1), "", InsertPt); MadeChange = true; @@ -559,6 +559,8 @@ static bool IsNonLocalValue(Value *V, BasicBlock *BB) { bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, const Type *AccessTy, DenseMap<Value*,Value*> &SunkAddrs) { + LLVMContext &Context = MemoryInst->getContext(); + // Figure out what addressing mode will be built up for this operation. SmallVector<Instruction*, 16> AddrModeInsts; ExtAddrMode AddrMode = AddressingModeMatcher::Match(Addr, AccessTy,MemoryInst, @@ -615,7 +617,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, V = new SExtInst(V, IntPtrTy, "sunkaddr", InsertPt); } if (AddrMode.Scale != 1) - V = BinaryOperator::CreateMul(V, Context->getConstantInt(IntPtrTy, + V = BinaryOperator::CreateMul(V, Context.getConstantInt(IntPtrTy, AddrMode.Scale), "sunkaddr", InsertPt); Result = V; @@ -647,7 +649,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, // Add in the Base Offset if present. if (AddrMode.BaseOffs) { - Value *V = Context->getConstantInt(IntPtrTy, AddrMode.BaseOffs); + Value *V = Context.getConstantInt(IntPtrTy, AddrMode.BaseOffs); if (Result) Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); else @@ -655,7 +657,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, } if (Result == 0) - SunkAddr = Context->getNullValue(Addr->getType()); + SunkAddr = Context.getNullValue(Addr->getType()); else SunkAddr = new IntToPtrInst(Result, Addr->getType(), "sunkaddr",InsertPt); } diff --git a/lib/Transforms/Scalar/ConstantProp.cpp b/lib/Transforms/Scalar/ConstantProp.cpp index d92ebda..2d9d2de 100644 --- a/lib/Transforms/Scalar/ConstantProp.cpp +++ b/lib/Transforms/Scalar/ConstantProp.cpp @@ -67,7 +67,7 @@ bool ConstantPropagation::runOnFunction(Function &F) { WorkList.erase(WorkList.begin()); // Get an element from the worklist... if (!I->use_empty()) // Don't muck with dead instructions... - if (Constant *C = ConstantFoldInstruction(I, Context)) { + if (Constant *C = ConstantFoldInstruction(I, F.getContext())) { // Add all of the users of this instruction to the worklist, they might // be constant propagatable now... for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp index bdb571d..2a50103 100644 --- a/lib/Transforms/Scalar/GVN.cpp +++ b/lib/Transforms/Scalar/GVN.cpp @@ -797,7 +797,7 @@ Value *GVN::GetValueForBlock(BasicBlock *BB, Instruction* orig, // If the block is unreachable, just return undef, since this path // can't actually occur at runtime. if (!DT->isReachableFromEntry(BB)) - return Phis[BB] = Context->getUndef(orig->getType()); + return Phis[BB] = BB->getContext().getUndef(orig->getType()); if (BasicBlock *Pred = BB->getSinglePredecessor()) { Value *ret = GetValueForBlock(Pred, orig, Phis); @@ -985,7 +985,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI, // Loading the allocation -> undef. if (isa<AllocationInst>(DepInst)) { ValuesPerBlock.push_back(std::make_pair(DepBB, - Context->getUndef(LI->getType()))); + DepBB->getContext().getUndef(LI->getType()))); continue; } @@ -1272,7 +1272,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { // undef value. This can happen when loading for a fresh allocation with no // intervening stores, for example. if (isa<AllocationInst>(DepInst)) { - L->replaceAllUsesWith(Context->getUndef(L->getType())); + L->replaceAllUsesWith(DepInst->getContext().getUndef(L->getType())); toErase.push_back(L); NumGVNLoad++; return true; @@ -1384,9 +1384,9 @@ bool GVN::processInstruction(Instruction *I, BasicBlock* falseSucc = BI->getSuccessor(1); if (trueSucc->getSinglePredecessor()) - localAvail[trueSucc]->table[condVN] = Context->getTrue(); + localAvail[trueSucc]->table[condVN] = trueSucc->getContext().getTrue(); if (falseSucc->getSinglePredecessor()) - localAvail[falseSucc]->table[condVN] = Context->getFalse(); + localAvail[falseSucc]->table[condVN] = trueSucc->getContext().getFalse(); return false; @@ -1628,7 +1628,7 @@ bool GVN::performPRE(Function& F) { // will be available in the predecessor by the time we need them. Any // that weren't original present will have been instantiated earlier // in this loop. - Instruction* PREInstr = CurInst->clone(*Context); + Instruction* PREInstr = CurInst->clone(CurInst->getContext()); bool success = true; for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) { Value *Op = PREInstr->getOperand(i); diff --git a/lib/Transforms/Scalar/GVNPRE.cpp b/lib/Transforms/Scalar/GVNPRE.cpp index 3f66131..85c272d 100644 --- a/lib/Transforms/Scalar/GVNPRE.cpp +++ b/lib/Transforms/Scalar/GVNPRE.cpp @@ -800,6 +800,8 @@ void GVNPRE::val_replace(ValueNumberedSet& s, Value* v) { Value* GVNPRE::phi_translate(Value* V, BasicBlock* pred, BasicBlock* succ) { if (V == 0) return 0; + + LLVMContext &Context = V->getContext(); // Unary Operations if (CastInst* U = dyn_cast<CastInst>(V)) { @@ -862,7 +864,7 @@ Value* GVNPRE::phi_translate(Value* V, BasicBlock* pred, BasicBlock* succ) { newOp1, newOp2, BO->getName()+".expr"); else if (CmpInst* C = dyn_cast<CmpInst>(U)) - newVal = CmpInst::Create(*Context, C->getOpcode(), + newVal = CmpInst::Create(Context, C->getOpcode(), C->getPredicate(), newOp1, newOp2, C->getName()+".expr"); @@ -1594,6 +1596,7 @@ void GVNPRE::buildsets(Function& F) { void GVNPRE::insertion_pre(Value* e, BasicBlock* BB, DenseMap<BasicBlock*, Value*>& avail, std::map<BasicBlock*, ValueNumberedSet>& new_sets) { + LLVMContext &Context = e->getContext(); for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { Value* e2 = avail[*PI]; if (!availableOut[*PI].test(VN.lookup(e2))) { @@ -1680,7 +1683,7 @@ void GVNPRE::insertion_pre(Value* e, BasicBlock* BB, BO->getName()+".gvnpre", (*PI)->getTerminator()); else if (CmpInst* C = dyn_cast<CmpInst>(U)) - newVal = CmpInst::Create(*Context, C->getOpcode(), + newVal = CmpInst::Create(Context, C->getOpcode(), C->getPredicate(), s1, s2, C->getName()+".gvnpre", (*PI)->getTerminator()); diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp index 0ac58f1..66a7862 100644 --- a/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -292,7 +292,7 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, if (NumPreds != 1) { // Clone the PHI and delete the original one. This lets IVUsers and // any other maps purge the original user from their records. - PHINode *NewPN = PN->clone(*Context); + PHINode *NewPN = PN->clone(PN->getContext()); NewPN->takeName(PN); NewPN->insertBefore(PN); PN->replaceAllUsesWith(NewPN); @@ -713,21 +713,23 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) { } if (NewPred == CmpInst::BAD_ICMP_PREDICATE) return; + LLVMContext &Context = PH->getContext(); + // Insert new integer induction variable. PHINode *NewPHI = PHINode::Create(Type::Int32Ty, PH->getName()+".int", PH); - NewPHI->addIncoming(Context->getConstantInt(Type::Int32Ty, newInitValue), + NewPHI->addIncoming(Context.getConstantInt(Type::Int32Ty, newInitValue), PH->getIncomingBlock(IncomingEdge)); Value *NewAdd = BinaryOperator::CreateAdd(NewPHI, - Context->getConstantInt(Type::Int32Ty, + Context.getConstantInt(Type::Int32Ty, newIncrValue), Incr->getName()+".int", Incr); NewPHI->addIncoming(NewAdd, PH->getIncomingBlock(BackEdge)); // The back edge is edge 1 of newPHI, whatever it may have been in the // original PHI. - ConstantInt *NewEV = Context->getConstantInt(Type::Int32Ty, intEV); + ConstantInt *NewEV = Context.getConstantInt(Type::Int32Ty, intEV); Value *LHS = (EVIndex == 1 ? NewPHI->getIncomingValue(1) : NewEV); Value *RHS = (EVIndex == 1 ? NewEV : NewPHI->getIncomingValue(1)); ICmpInst *NewEC = new ICmpInst(EC->getParent()->getTerminator(), @@ -743,7 +745,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) { RecursivelyDeleteTriviallyDeadInstructions(EC); // Delete old, floating point, increment instruction. - Incr->replaceAllUsesWith(Context->getUndef(Incr->getType())); + Incr->replaceAllUsesWith(Context.getUndef(Incr->getType())); RecursivelyDeleteTriviallyDeadInstructions(Incr); // Replace floating induction variable, if it isn't already deleted. diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index f52bb86..01fa68b 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -85,7 +85,8 @@ namespace { static char ID; // Pass identification, replacement for typeid InstCombiner() : FunctionPass(&ID) {} - LLVMContext *getContext() { return Context; } + LLVMContext *Context; + LLVMContext *getContext() const { return Context; } /// AddToWorkList - Add the specified instruction to the worklist if it /// isn't already in it. @@ -11557,7 +11558,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { if (GV->isConstant() && GV->hasDefinitiveInitializer()) if (Constant *V = ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE, - Context)) + *Context)) return ReplaceInstUsesWith(LI, V); if (CE->getOperand(0)->isNullValue()) { // Insert a new store to null instruction before the load to indicate @@ -13082,6 +13083,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { bool InstCombiner::runOnFunction(Function &F) { MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID); + Context = &F.getContext(); bool EverMadeChange = false; diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp index 1ce823a..6b665cc 100644 --- a/lib/Transforms/Scalar/JumpThreading.cpp +++ b/lib/Transforms/Scalar/JumpThreading.cpp @@ -435,7 +435,8 @@ bool JumpThreading::ProcessBranchOnDuplicateCond(BasicBlock *PredBB, << "' folding condition to '" << BranchDir << "': " << *BB->getTerminator(); ++NumFolds; - DestBI->setCondition(Context->getConstantInt(Type::Int1Ty, BranchDir)); + DestBI->setCondition(BB->getContext().getConstantInt(Type::Int1Ty, + BranchDir)); ConstantFoldTerminator(BB); return true; } @@ -564,7 +565,8 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) { // If the returned value is the load itself, replace with an undef. This can // only happen in dead loops. - if (AvailableVal == LI) AvailableVal = Context->getUndef(LI->getType()); + if (AvailableVal == LI) AvailableVal = + AvailableVal->getContext().getUndef(LI->getType()); LI->replaceAllUsesWith(AvailableVal); LI->eraseFromParent(); return true; @@ -718,7 +720,7 @@ bool JumpThreading::ProcessJumpOnPHI(PHINode *PN) { // Next, figure out which successor we are threading to. BasicBlock *SuccBB; if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) - SuccBB = BI->getSuccessor(PredCst == Context->getFalse()); + SuccBB = BI->getSuccessor(PredCst == PredBB->getContext().getFalse()); else { SwitchInst *SI = cast<SwitchInst>(BB->getTerminator()); SuccBB = SI->getSuccessor(SI->findCaseValue(PredCst)); @@ -756,7 +758,7 @@ bool JumpThreading::ProcessBranchOnLogical(Value *V, BasicBlock *BB, // We can only do the simplification for phi nodes of 'false' with AND or // 'true' with OR. See if we have any entries in the phi for this. unsigned PredNo = ~0U; - ConstantInt *PredCst = Context->getConstantInt(Type::Int1Ty, !isAnd); + ConstantInt *PredCst = V->getContext().getConstantInt(Type::Int1Ty, !isAnd); for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { if (PN->getIncomingValue(i) == PredCst) { PredNo = i; @@ -795,15 +797,15 @@ bool JumpThreading::ProcessBranchOnLogical(Value *V, BasicBlock *BB, /// result can not be determined, a null pointer is returned. static Constant *GetResultOfComparison(CmpInst::Predicate pred, Value *LHS, Value *RHS, - LLVMContext *Context) { + LLVMContext &Context) { if (Constant *CLHS = dyn_cast<Constant>(LHS)) if (Constant *CRHS = dyn_cast<Constant>(RHS)) - return Context->getConstantExprCompare(pred, CLHS, CRHS); + return Context.getConstantExprCompare(pred, CLHS, CRHS); if (LHS == RHS) if (isa<IntegerType>(LHS->getType()) || isa<PointerType>(LHS->getType())) return ICmpInst::isTrueWhenEqual(pred) ? - Context->getTrue() : Context->getFalse(); + Context.getTrue() : Context.getFalse(); return 0; } @@ -829,7 +831,7 @@ bool JumpThreading::ProcessBranchOnCompare(CmpInst *Cmp, BasicBlock *BB) { PredVal = PN->getIncomingValue(i); Constant *Res = GetResultOfComparison(Cmp->getPredicate(), PredVal, - RHS, Context); + RHS, Cmp->getContext()); if (!Res) { PredVal = 0; continue; @@ -931,7 +933,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB, BasicBlock *PredBB, // Clone the non-phi instructions of BB into NewBB, keeping track of the // mapping and using it to remap operands in the cloned instructions. for (; !isa<TerminatorInst>(BI); ++BI) { - Instruction *New = BI->clone(*Context); + Instruction *New = BI->clone(BI->getContext()); New->setName(BI->getNameStart()); NewBB->getInstList().push_back(New); ValueMapping[BI] = New; diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp index bdf7dee..e8b543b 100644 --- a/lib/Transforms/Scalar/LICM.cpp +++ b/lib/Transforms/Scalar/LICM.cpp @@ -475,6 +475,8 @@ void LICM::sink(Instruction &I) { ++NumSunk; Changed = true; + LLVMContext &Context = I.getContext(); + // The case where there is only a single exit node of this loop is common // enough that we handle it as a special (more efficient) case. It is more // efficient to handle because there are no PHI nodes that need to be placed. @@ -483,7 +485,7 @@ void LICM::sink(Instruction &I) { // Instruction is not used, just delete it. CurAST->deleteValue(&I); if (!I.use_empty()) // If I has users in unreachable blocks, eliminate. - I.replaceAllUsesWith(Context->getUndef(I.getType())); + I.replaceAllUsesWith(Context.getUndef(I.getType())); I.eraseFromParent(); } else { // Move the instruction to the start of the exit block, after any PHI @@ -497,7 +499,7 @@ void LICM::sink(Instruction &I) { // The instruction is actually dead if there ARE NO exit blocks. CurAST->deleteValue(&I); if (!I.use_empty()) // If I has users in unreachable blocks, eliminate. - I.replaceAllUsesWith(Context->getUndef(I.getType())); + I.replaceAllUsesWith(Context.getUndef(I.getType())); I.eraseFromParent(); } else { // Otherwise, if we have multiple exits, use the PromoteMem2Reg function to @@ -570,7 +572,7 @@ void LICM::sink(Instruction &I) { ExitBlock->getInstList().insert(InsertPt, &I); New = &I; } else { - New = I.clone(*Context); + New = I.clone(Context); CurAST->copyValue(&I, New); if (!I.getName().empty()) New->setName(I.getName()+".le"); @@ -768,7 +770,7 @@ void LICM::PromoteValuesInLoop() { PromotedAllocas.reserve(PromotedValues.size()); for (unsigned i = 0, e = PromotedValues.size(); i != e; ++i) PromotedAllocas.push_back(PromotedValues[i].first); - PromoteMemToReg(PromotedAllocas, *DT, *DF, Context, CurAST); + PromoteMemToReg(PromotedAllocas, *DT, *DF, Preheader->getContext(), CurAST); } /// FindPromotableValuesInLoop - Check the current loop for stores to definite diff --git a/lib/Transforms/Scalar/LoopIndexSplit.cpp b/lib/Transforms/Scalar/LoopIndexSplit.cpp index 4c6689d..f20511e 100644 --- a/lib/Transforms/Scalar/LoopIndexSplit.cpp +++ b/lib/Transforms/Scalar/LoopIndexSplit.cpp @@ -294,15 +294,15 @@ static bool isUsedOutsideLoop(Value *V, Loop *L) { // Return V+1 static Value *getPlusOne(Value *V, bool Sign, Instruction *InsertPt, - LLVMContext *Context) { - Constant *One = Context->getConstantInt(V->getType(), 1, Sign); + LLVMContext &Context) { + Constant *One = Context.getConstantInt(V->getType(), 1, Sign); return BinaryOperator::CreateAdd(V, One, "lsp", InsertPt); } // Return V-1 static Value *getMinusOne(Value *V, bool Sign, Instruction *InsertPt, - LLVMContext *Context) { - Constant *One = Context->getConstantInt(V->getType(), 1, Sign); + LLVMContext &Context) { + Constant *One = Context.getConstantInt(V->getType(), 1, Sign); return BinaryOperator::CreateSub(V, One, "lsp", InsertPt); } @@ -493,6 +493,8 @@ bool LoopIndexSplit::restrictLoopBound(ICmpInst &Op) { EBR->setSuccessor(1, T); } + LLVMContext &Context = Op.getContext(); + // New upper and lower bounds. Value *NLB = NULL; Value *NUB = NULL; @@ -879,6 +881,8 @@ bool LoopIndexSplit::splitLoop() { BasicBlock *ExitingBlock = ExitCondition->getParent(); if (!cleanBlock(ExitingBlock)) return false; + LLVMContext &Context = Header->getContext(); + for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E; ++I) { BranchInst *BR = dyn_cast<BranchInst>((*I)->getTerminator()); diff --git a/lib/Transforms/Scalar/LoopRotation.cpp b/lib/Transforms/Scalar/LoopRotation.cpp index 6d80365..d2b20fa 100644 --- a/lib/Transforms/Scalar/LoopRotation.cpp +++ b/lib/Transforms/Scalar/LoopRotation.cpp @@ -238,7 +238,7 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) { // This is not a PHI instruction. Insert its clone into original pre-header. // If this instruction is using a value from same basic block then // update it to use value from cloned instruction. - Instruction *C = In->clone(*Context); + Instruction *C = In->clone(In->getContext()); C->setName(In->getName()); OrigPreHeader->getInstList().push_back(C); diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index a1781c9..22717c2 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -1576,7 +1576,9 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV *const &Stride, BasicBlock *LatchBlock = L->getLoopLatch(); Instruction *IVIncInsertPt = LatchBlock->getTerminator(); - Value *CommonBaseV = Context->getNullValue(ReplacedTy); + LLVMContext &Context = Preheader->getContext(); + + Value *CommonBaseV = Context.getNullValue(ReplacedTy); const SCEV *RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy); IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty), @@ -1859,6 +1861,8 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride); if (!SC) return Cond; + LLVMContext &Context = Cond->getContext(); + ICmpInst::Predicate Predicate = Cond->getPredicate(); int64_t CmpSSInt = SC->getValue()->getSExtValue(); unsigned BitWidth = SE->getTypeSizeInBits((*CondStride)->getType()); @@ -1942,7 +1946,7 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, NewCmpTy = NewCmpLHS->getType(); NewTyBits = SE->getTypeSizeInBits(NewCmpTy); - const Type *NewCmpIntTy = Context->getIntegerType(NewTyBits); + const Type *NewCmpIntTy = Context.getIntegerType(NewTyBits); if (RequiresTypeConversion(NewCmpTy, CmpTy)) { // Check if it is possible to rewrite it using // an iv / stride of a smaller integer type. @@ -1987,10 +1991,10 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, NewStride = &IU->StrideOrder[i]; if (!isa<PointerType>(NewCmpTy)) - NewCmpRHS = Context->getConstantInt(NewCmpTy, NewCmpVal); + NewCmpRHS = Context.getConstantInt(NewCmpTy, NewCmpVal); else { - Constant *CI = Context->getConstantInt(NewCmpIntTy, NewCmpVal); - NewCmpRHS = Context->getConstantExprIntToPtr(CI, NewCmpTy); + Constant *CI = Context.getConstantInt(NewCmpIntTy, NewCmpVal); + NewCmpRHS = Context.getConstantExprIntToPtr(CI, NewCmpTy); } NewOffset = TyBits == NewTyBits ? SE->getMulExpr(CondUse->getOffset(), @@ -2171,6 +2175,8 @@ void LoopStrengthReduce::OptimizeShadowIV(Loop *L) { const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) return; + + LLVMContext &Context = L->getHeader()->getContext(); for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e; ++Stride) { @@ -2233,7 +2239,7 @@ void LoopStrengthReduce::OptimizeShadowIV(Loop *L) { ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); if (!Init) continue; - Constant *NewInit = Context->getConstantFP(DestTy, Init->getZExtValue()); + Constant *NewInit = Context.getConstantFP(DestTy, Init->getZExtValue()); BinaryOperator *Incr = dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); @@ -2257,7 +2263,7 @@ void LoopStrengthReduce::OptimizeShadowIV(Loop *L) { PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH); /* create new increment. '++d' in above example. */ - Constant *CFP = Context->getConstantFP(DestTy, C->getZExtValue()); + Constant *CFP = Context.getConstantFP(DestTy, C->getZExtValue()); BinaryOperator *NewIncr = BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? Instruction::FAdd : Instruction::FSub, @@ -2293,6 +2299,8 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { // one register value. BasicBlock *LatchBlock = L->getLoopLatch(); BasicBlock *ExitingBlock = L->getExitingBlock(); + LLVMContext &Context = LatchBlock->getContext(); + if (!ExitingBlock) // Multiple exits, just look at the exit in the latch block if there is one. ExitingBlock = LatchBlock; @@ -2382,7 +2390,7 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { Cond->moveBefore(TermBr); } else { // Otherwise, clone the terminating condition and insert into the loopend. - Cond = cast<ICmpInst>(Cond->clone(*Context)); + Cond = cast<ICmpInst>(Cond->clone(Context)); Cond->setName(L->getHeader()->getName() + ".termcond"); LatchBlock->getInstList().insert(TermBr, Cond); @@ -2424,6 +2432,8 @@ void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) { if (!ExitingBlock) return; // More than one block exiting! + LLVMContext &Context = ExitingBlock->getContext(); + // Okay, we've computed the exiting block. See what condition causes us to // exit. // @@ -2496,7 +2506,7 @@ void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) { Value *startVal = phi->getIncomingValue(inBlock); Value *endVal = Cond->getOperand(1); // FIXME check for case where both are constant - Constant* Zero = Context->getConstantInt(Cond->getOperand(1)->getType(), 0); + Constant* Zero = Context.getConstantInt(Cond->getOperand(1)->getType(), 0); BinaryOperator *NewStartVal = BinaryOperator::Create(Instruction::Sub, endVal, startVal, "tmp", PreInsertPt); diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp index 16e04b3..100b8c7 100644 --- a/lib/Transforms/Scalar/LoopUnswitch.cpp +++ b/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -216,6 +216,7 @@ bool LoopUnswitch::runOnLoop(Loop *L, LPPassManager &LPM_Ref) { /// and profitable. bool LoopUnswitch::processCurrentLoop() { bool Changed = false; + LLVMContext &Context = currentLoop->getHeader()->getContext(); // Loop over all of the basic blocks in the loop. If we find an interior // block that is branching on a loop-invariant condition, we can unswitch this @@ -233,7 +234,7 @@ bool LoopUnswitch::processCurrentLoop() { Value *LoopCond = FindLIVLoopCondition(BI->getCondition(), currentLoop, Changed); if (LoopCond && UnswitchIfProfitable(LoopCond, - Context->getTrue())) { + Context.getTrue())) { ++NumBranches; return true; } @@ -263,7 +264,7 @@ bool LoopUnswitch::processCurrentLoop() { Value *LoopCond = FindLIVLoopCondition(SI->getCondition(), currentLoop, Changed); if (LoopCond && UnswitchIfProfitable(LoopCond, - Context->getTrue())) { + Context.getTrue())) { ++NumSelects; return true; } @@ -337,6 +338,7 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val, BasicBlock **LoopExit) { BasicBlock *Header = currentLoop->getHeader(); TerminatorInst *HeaderTerm = Header->getTerminator(); + LLVMContext &Context = Header->getContext(); BasicBlock *LoopExitBB = 0; if (BranchInst *BI = dyn_cast<BranchInst>(HeaderTerm)) { @@ -351,10 +353,10 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val, // this. if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop, BI->getSuccessor(0)))) { - if (Val) *Val = Context->getTrue(); + if (Val) *Val = Context.getTrue(); } else if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop, BI->getSuccessor(1)))) { - if (Val) *Val = Context->getFalse(); + if (Val) *Val = Context.getFalse(); } } else if (SwitchInst *SI = dyn_cast<SwitchInst>(HeaderTerm)) { // If this isn't a switch on Cond, we can't handle it. @@ -510,7 +512,7 @@ void LoopUnswitch::EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val, Value *BranchVal = LIC; if (!isa<ConstantInt>(Val) || Val->getType() != Type::Int1Ty) BranchVal = new ICmpInst(InsertPt, ICmpInst::ICMP_EQ, LIC, Val, "tmp"); - else if (Val != Context->getTrue()) + else if (Val != Val->getContext().getTrue()) // We want to enter the new loop when the condition is true. std::swap(TrueDest, FalseDest); @@ -818,7 +820,7 @@ void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB, // Anything that uses the instructions in this basic block should have their // uses replaced with undefs. if (!I->use_empty()) - I->replaceAllUsesWith(Context->getUndef(I->getType())); + I->replaceAllUsesWith(I->getContext().getUndef(I->getType())); } // If this is the edge to the header block for a loop, remove the loop and @@ -899,6 +901,8 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC, // selects, switches. std::vector<User*> Users(LIC->use_begin(), LIC->use_end()); std::vector<Instruction*> Worklist; + LLVMContext &Context = Val->getContext(); + // If we know that LIC == Val, or that LIC == NotVal, just replace uses of LIC // in the loop with the appropriate one directly. @@ -907,7 +911,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC, if (IsEqual) Replacement = Val; else - Replacement = Context->getConstantInt(Type::Int1Ty, + Replacement = Context.getConstantInt(Type::Int1Ty, !cast<ConstantInt>(Val)->getZExtValue()); for (unsigned i = 0, e = Users.size(); i != e; ++i) @@ -947,7 +951,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC, Instruction* OldTerm = Old->getTerminator(); BranchInst::Create(Split, SISucc, - Context->getTrue(), OldTerm); + Context.getTrue(), OldTerm); LPM->deleteSimpleAnalysisValue(Old->getTerminator(), L); Old->getTerminator()->eraseFromParent(); @@ -988,7 +992,7 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) { Worklist.pop_back(); // Simple constant folding. - if (Constant *C = ConstantFoldInstruction(I, Context)) { + if (Constant *C = ConstantFoldInstruction(I, I->getContext())) { ReplaceUsesOfWith(I, C, Worklist, L, LPM); continue; } diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 3c7a5ab..8937e1c 100644 --- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -36,7 +36,7 @@ STATISTIC(NumMemSetInfer, "Number of memsets inferred"); /// true for all i8 values obviously, but is also true for i32 0, i32 -1, /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated /// byte store (e.g. i16 0x1234), return null. -static Value *isBytewiseValue(Value *V, LLVMContext* Context) { +static Value *isBytewiseValue(Value *V, LLVMContext& Context) { // All byte-wide stores are splatable, even of arbitrary variables. if (V->getType() == Type::Int8Ty) return V; @@ -44,9 +44,9 @@ static Value *isBytewiseValue(Value *V, LLVMContext* Context) { // corresponding integer value is "byteable". An important case is 0.0. if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { if (CFP->getType() == Type::FloatTy) - V = Context->getConstantExprBitCast(CFP, Type::Int32Ty); + V = Context.getConstantExprBitCast(CFP, Type::Int32Ty); if (CFP->getType() == Type::DoubleTy) - V = Context->getConstantExprBitCast(CFP, Type::Int64Ty); + V = Context.getConstantExprBitCast(CFP, Type::Int64Ty); // Don't handle long double formats, which have strange constraints. } @@ -69,7 +69,7 @@ static Value *isBytewiseValue(Value *V, LLVMContext* Context) { if (Val != Val2) return 0; } - return Context->getConstantInt(Val); + return Context.getConstantInt(Val); } } @@ -346,7 +346,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) { // Ensure that the value being stored is something that can be memset'able a // byte at a time like "0" or "-1" or any width, as well as things like // 0xA0A0A0A0 and 0.0. - Value *ByteVal = isBytewiseValue(SI->getOperand(0), Context); + Value *ByteVal = isBytewiseValue(SI->getOperand(0), SI->getContext()); if (!ByteVal) return false; @@ -385,7 +385,8 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) { if (NextStore->isVolatile()) break; // Check to see if this stored value is of the same byte-splattable value. - if (ByteVal != isBytewiseValue(NextStore->getOperand(0), Context)) + if (ByteVal != isBytewiseValue(NextStore->getOperand(0), + NextStore->getContext())) break; // Check to see if this store is to a constant offset from the start ptr. @@ -439,15 +440,17 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) { StartPtr = Range.StartPtr; // Cast the start ptr to be i8* as memset requires. - const Type *i8Ptr = Context->getPointerTypeUnqual(Type::Int8Ty); + const Type *i8Ptr = SI->getContext().getPointerTypeUnqual(Type::Int8Ty); if (StartPtr->getType() != i8Ptr) StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getNameStart(), InsertPt); Value *Ops[] = { StartPtr, ByteVal, // Start, value - Context->getConstantInt(Type::Int64Ty, Range.End-Range.Start), // size - Context->getConstantInt(Type::Int32Ty, Range.Alignment) // align + // size + SI->getContext().getConstantInt(Type::Int64Ty, Range.End-Range.Start), + // align + SI->getContext().getConstantInt(Type::Int32Ty, Range.Alignment) }; Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt); DEBUG(cerr << "Replace stores:\n"; diff --git a/lib/Transforms/Scalar/PredicateSimplifier.cpp b/lib/Transforms/Scalar/PredicateSimplifier.cpp index 9928c68..4ee19f7 100644 --- a/lib/Transforms/Scalar/PredicateSimplifier.cpp +++ b/lib/Transforms/Scalar/PredicateSimplifier.cpp @@ -1664,7 +1664,7 @@ namespace { TopBB(TopBB), TopInst(NULL), modified(modified), - Context(TopBB->getContext()) + Context(&TopBB->getContext()) { assert(Top && "VRPSolver created for unreachable basic block."); } @@ -1681,7 +1681,7 @@ namespace { TopBB(TopInst->getParent()), TopInst(TopInst), modified(modified), - Context(TopInst->getParent()->getContext()) + Context(&TopInst->getContext()) { assert(Top && "VRPSolver created for unreachable basic block."); assert(Top->getBlock() == TopInst->getParent() && "Context mismatch."); @@ -2267,6 +2267,7 @@ namespace { std::vector<DomTreeDFS::Node *> WorkList; + LLVMContext *Context; public: static char ID; // Pass identification, replacement for typeid PredicateSimplifier() : FunctionPass(&ID) {} @@ -2402,6 +2403,7 @@ namespace { DominatorTree *DT = &getAnalysis<DominatorTree>(); DTDFS = new DomTreeDFS(DT); TargetData *TD = &getAnalysis<TargetData>(); + Context = &F.getContext(); DOUT << "Entering Function: " << F.getName() << "\n"; @@ -2447,7 +2449,7 @@ namespace { return; } - LLVMContext *Context = BI.getParent()->getContext(); + LLVMContext *Context = &BI.getContext(); for (DomTreeDFS::Node::iterator I = DTNode->begin(), E = DTNode->end(); I != E; ++I) { @@ -2505,7 +2507,7 @@ namespace { void PredicateSimplifier::Forwards::visitAllocaInst(AllocaInst &AI) { VRPSolver VRP(VN, IG, UB, VR, PS->DTDFS, PS->modified, &AI); - VRP.add(AI.getParent()->getContext()->getNullValue(AI.getType()), + VRP.add(AI.getContext().getNullValue(AI.getType()), &AI, ICmpInst::ICMP_NE); VRP.solve(); } @@ -2516,7 +2518,7 @@ namespace { if (isa<Constant>(Ptr)) return; VRPSolver VRP(VN, IG, UB, VR, PS->DTDFS, PS->modified, &LI); - VRP.add(LI.getParent()->getContext()->getNullValue(Ptr->getType()), + VRP.add(LI.getContext().getNullValue(Ptr->getType()), Ptr, ICmpInst::ICMP_NE); VRP.solve(); } @@ -2526,14 +2528,14 @@ namespace { if (isa<Constant>(Ptr)) return; VRPSolver VRP(VN, IG, UB, VR, PS->DTDFS, PS->modified, &SI); - VRP.add(SI.getParent()->getContext()->getNullValue(Ptr->getType()), + VRP.add(SI.getContext().getNullValue(Ptr->getType()), Ptr, ICmpInst::ICMP_NE); VRP.solve(); } void PredicateSimplifier::Forwards::visitSExtInst(SExtInst &SI) { VRPSolver VRP(VN, IG, UB, VR, PS->DTDFS, PS->modified, &SI); - LLVMContext *Context = SI.getParent()->getContext(); + LLVMContext *Context = &SI.getContext(); uint32_t SrcBitWidth = cast<IntegerType>(SI.getSrcTy())->getBitWidth(); uint32_t DstBitWidth = cast<IntegerType>(SI.getDestTy())->getBitWidth(); APInt Min(APInt::getHighBitsSet(DstBitWidth, DstBitWidth-SrcBitWidth+1)); @@ -2545,7 +2547,7 @@ namespace { void PredicateSimplifier::Forwards::visitZExtInst(ZExtInst &ZI) { VRPSolver VRP(VN, IG, UB, VR, PS->DTDFS, PS->modified, &ZI); - LLVMContext *Context = ZI.getParent()->getContext(); + LLVMContext *Context = &ZI.getContext(); uint32_t SrcBitWidth = cast<IntegerType>(ZI.getSrcTy())->getBitWidth(); uint32_t DstBitWidth = cast<IntegerType>(ZI.getDestTy())->getBitWidth(); APInt Max(APInt::getLowBitsSet(DstBitWidth, SrcBitWidth)); @@ -2564,7 +2566,7 @@ namespace { case Instruction::SDiv: { Value *Divisor = BO.getOperand(1); VRPSolver VRP(VN, IG, UB, VR, PS->DTDFS, PS->modified, &BO); - VRP.add(BO.getParent()->getContext()->getNullValue(Divisor->getType()), + VRP.add(BO.getContext().getNullValue(Divisor->getType()), Divisor, ICmpInst::ICMP_NE); VRP.solve(); break; @@ -2638,7 +2640,7 @@ namespace { Pred = IC.getPredicate(); - LLVMContext *Context = IC.getParent()->getContext(); + LLVMContext *Context = &IC.getContext(); if (ConstantInt *Op1 = dyn_cast<ConstantInt>(IC.getOperand(1))) { ConstantInt *NextVal = 0; diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp index 2877852..9456940 100644 --- a/lib/Transforms/Scalar/Reassociate.cpp +++ b/lib/Transforms/Scalar/Reassociate.cpp @@ -200,8 +200,8 @@ static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) { /// static Instruction *LowerNegateToMultiply(Instruction *Neg, std::map<AssertingVH<>, unsigned> &ValueRankMap, - LLVMContext *Context) { - Constant *Cst = Context->getAllOnesValue(Neg->getType()); + LLVMContext &Context) { + Constant *Cst = Neg->getContext().getAllOnesValue(Neg->getType()); Instruction *Res = BinaryOperator::CreateMul(Neg->getOperand(1), Cst, "",Neg); ValueRankMap.erase(Neg); @@ -256,6 +256,7 @@ void Reassociate::LinearizeExprTree(BinaryOperator *I, std::vector<ValueEntry> &Ops) { Value *LHS = I->getOperand(0), *RHS = I->getOperand(1); unsigned Opcode = I->getOpcode(); + LLVMContext &Context = I->getContext(); // First step, linearize the expression if it is in ((A+B)+(C+D)) form. BinaryOperator *LHSBO = isReassociableOp(LHS, Opcode); @@ -284,8 +285,8 @@ void Reassociate::LinearizeExprTree(BinaryOperator *I, Ops.push_back(ValueEntry(getRank(RHS), RHS)); // Clear the leaves out. - I->setOperand(0, Context->getUndef(I->getType())); - I->setOperand(1, Context->getUndef(I->getType())); + I->setOperand(0, Context.getUndef(I->getType())); + I->setOperand(1, Context.getUndef(I->getType())); return; } else { // Turn X+(Y+Z) -> (Y+Z)+X @@ -320,7 +321,7 @@ void Reassociate::LinearizeExprTree(BinaryOperator *I, Ops.push_back(ValueEntry(getRank(RHS), RHS)); // Clear the RHS leaf out. - I->setOperand(1, Context->getUndef(I->getType())); + I->setOperand(1, Context.getUndef(I->getType())); } // RewriteExprTree - Now that the operands for this expression tree are @@ -373,7 +374,7 @@ void Reassociate::RewriteExprTree(BinaryOperator *I, // version of the value is returned, and BI is left pointing at the instruction // that should be processed next by the reassociation pass. // -static Value *NegateValue(LLVMContext *Context, Value *V, Instruction *BI) { +static Value *NegateValue(LLVMContext &Context, Value *V, Instruction *BI) { // We are trying to expose opportunity for reassociation. One of the things // that we want to do to achieve this is to push a negation as deep into an // expression chain as possible, to expose the add instructions. In practice, @@ -402,12 +403,12 @@ static Value *NegateValue(LLVMContext *Context, Value *V, Instruction *BI) { // Insert a 'neg' instruction that subtracts the value from zero to get the // negation. // - return BinaryOperator::CreateNeg(*Context, V, V->getName() + ".neg", BI); + return BinaryOperator::CreateNeg(Context, V, V->getName() + ".neg", BI); } /// ShouldBreakUpSubtract - Return true if we should break up this subtract of /// X-Y into (X + -Y). -static bool ShouldBreakUpSubtract(LLVMContext *Context, Instruction *Sub) { +static bool ShouldBreakUpSubtract(LLVMContext &Context, Instruction *Sub) { // If this is a negation, we can't split it up! if (BinaryOperator::isNeg(Sub)) return false; @@ -431,7 +432,7 @@ static bool ShouldBreakUpSubtract(LLVMContext *Context, Instruction *Sub) { /// BreakUpSubtract - If we have (X-Y), and if either X is an add, or if this is /// only used by an add, transform this into (X+(0-Y)) to promote better /// reassociation. -static Instruction *BreakUpSubtract(LLVMContext *Context, Instruction *Sub, +static Instruction *BreakUpSubtract(LLVMContext &Context, Instruction *Sub, std::map<AssertingVH<>, unsigned> &ValueRankMap) { // Convert a subtract into an add and a neg instruction... so that sub // instructions can be commuted with other add instructions... @@ -458,16 +459,16 @@ static Instruction *BreakUpSubtract(LLVMContext *Context, Instruction *Sub, /// reassociation. static Instruction *ConvertShiftToMul(Instruction *Shl, std::map<AssertingVH<>, unsigned> &ValueRankMap, - LLVMContext *Context) { + LLVMContext &Context) { // If an operand of this shift is a reassociable multiply, or if the shift // is used by a reassociable multiply or add, turn into a multiply. if (isReassociableOp(Shl->getOperand(0), Instruction::Mul) || (Shl->hasOneUse() && (isReassociableOp(Shl->use_back(), Instruction::Mul) || isReassociableOp(Shl->use_back(), Instruction::Add)))) { - Constant *MulCst = Context->getConstantInt(Shl->getType(), 1); + Constant *MulCst = Context.getConstantInt(Shl->getType(), 1); MulCst = - Context->getConstantExprShl(MulCst, cast<Constant>(Shl->getOperand(1))); + Context.getConstantExprShl(MulCst, cast<Constant>(Shl->getOperand(1))); Instruction *Mul = BinaryOperator::CreateMul(Shl->getOperand(0), MulCst, "", Shl); @@ -562,12 +563,14 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I, bool IterateOptimization = false; if (Ops.size() == 1) return Ops[0].Op; + LLVMContext &Context = I->getContext(); + unsigned Opcode = I->getOpcode(); if (Constant *V1 = dyn_cast<Constant>(Ops[Ops.size()-2].Op)) if (Constant *V2 = dyn_cast<Constant>(Ops.back().Op)) { Ops.pop_back(); - Ops.back().Op = Context->getConstantExpr(Opcode, V1, V2); + Ops.back().Op = Context.getConstantExpr(Opcode, V1, V2); return OptimizeExpression(I, Ops); } @@ -623,10 +626,10 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I, if (FoundX != i) { if (Opcode == Instruction::And) { // ...&X&~X = 0 ++NumAnnihil; - return Context->getNullValue(X->getType()); + return Context.getNullValue(X->getType()); } else if (Opcode == Instruction::Or) { // ...|X|~X = -1 ++NumAnnihil; - return Context->getAllOnesValue(X->getType()); + return Context.getAllOnesValue(X->getType()); } } } @@ -645,7 +648,7 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I, assert(Opcode == Instruction::Xor); if (e == 2) { ++NumAnnihil; - return Context->getNullValue(Ops[0].Op->getType()); + return Context.getNullValue(Ops[0].Op->getType()); } // ... X^X -> ... Ops.erase(Ops.begin()+i, Ops.begin()+i+2); @@ -670,7 +673,7 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I, // Remove X and -X from the operand list. if (Ops.size() == 2) { ++NumAnnihil; - return Context->getNullValue(X->getType()); + return Context.getNullValue(X->getType()); } else { Ops.erase(Ops.begin()+i); if (i < FoundX) @@ -781,6 +784,8 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I, /// ReassociateBB - Inspect all of the instructions in this basic block, /// reassociating them as we go. void Reassociate::ReassociateBB(BasicBlock *BB) { + LLVMContext &Context = BB->getContext(); + for (BasicBlock::iterator BBI = BB->begin(); BBI != BB->end(); ) { Instruction *BI = BBI++; if (BI->getOpcode() == Instruction::Shl && diff --git a/lib/Transforms/Scalar/Reg2Mem.cpp b/lib/Transforms/Scalar/Reg2Mem.cpp index ac95d25..2c1569d 100644 --- a/lib/Transforms/Scalar/Reg2Mem.cpp +++ b/lib/Transforms/Scalar/Reg2Mem.cpp @@ -69,7 +69,7 @@ namespace { CastInst *AllocaInsertionPoint = CastInst::Create(Instruction::BitCast, - Context->getNullValue(Type::Int32Ty), Type::Int32Ty, + F.getContext().getNullValue(Type::Int32Ty), Type::Int32Ty, "reg2mem alloca point", I); // Find the escaped instructions. But don't create stack slots for diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp index 5952e97..106428e 100644 --- a/lib/Transforms/Scalar/SCCP.cpp +++ b/lib/Transforms/Scalar/SCCP.cpp @@ -645,7 +645,7 @@ void SCCPSolver::visitReturnInst(ReturnInst &I) { DenseMap<std::pair<Function*, unsigned>, LatticeVal>::iterator It = TrackedMultipleRetVals.find(std::make_pair(F, i)); if (It == TrackedMultipleRetVals.end()) break; - if (Value *Val = FindInsertedValue(I.getOperand(0), i, Context)) + if (Value *Val = FindInsertedValue(I.getOperand(0), i, I.getContext())) mergeInValue(It->second, F, getValueState(Val)); } } @@ -1162,7 +1162,7 @@ void SCCPSolver::visitLoadInst(LoadInst &I) { if (GV->isConstant() && GV->hasDefinitiveInitializer()) if (Constant *V = ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE, - Context)) { + *Context)) { markConstant(IV, &I, V); return; } @@ -1537,7 +1537,7 @@ FunctionPass *llvm::createSCCPPass() { bool SCCP::runOnFunction(Function &F) { DOUT << "SCCP on function '" << F.getNameStart() << "'\n"; SCCPSolver Solver; - Solver.setContext(Context); + Solver.setContext(&F.getContext()); // Mark the first block of the function as being executable. Solver.MarkBlockExecutable(F.begin()); @@ -1577,7 +1577,7 @@ bool SCCP::runOnFunction(Function &F) { Instruction *I = Insts.back(); Insts.pop_back(); if (!I->use_empty()) - I->replaceAllUsesWith(Context->getUndef(I->getType())); + I->replaceAllUsesWith(F.getContext().getUndef(I->getType())); BB->getInstList().erase(I); MadeChanges = true; ++NumInstRemoved; @@ -1597,7 +1597,7 @@ bool SCCP::runOnFunction(Function &F) { continue; Constant *Const = IV.isConstant() - ? IV.getConstant() : Context->getUndef(Inst->getType()); + ? IV.getConstant() : F.getContext().getUndef(Inst->getType()); DOUT << " Constant: " << *Const << " = " << *Inst; // Replaces all of the uses of a variable with uses of the constant. @@ -1662,7 +1662,7 @@ static bool AddressIsTaken(GlobalValue *GV) { } bool IPSCCP::runOnModule(Module &M) { - Context = &M.getContext(); + LLVMContext *Context = &M.getContext(); SCCPSolver Solver; Solver.setContext(Context); diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index 7df4eb6..b9044d1 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -187,7 +187,7 @@ bool SROA::performPromotion(Function &F) { if (Allocas.empty()) break; - PromoteMemToReg(Allocas, DT, DF, Context); + PromoteMemToReg(Allocas, DT, DF, F.getContext()); NumPromoted += Allocas.size(); Changed = true; } @@ -243,7 +243,7 @@ bool SROA::performScalarRepl(Function &F) { DOUT << " memcpy = " << *TheCopy; Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2)); AI->replaceAllUsesWith( - Context->getConstantExprBitCast(TheSrc, AI->getType())); + F.getContext().getConstantExprBitCast(TheSrc, AI->getType())); TheCopy->eraseFromParent(); // Don't mutate the global. AI->eraseFromParent(); ++NumGlobals; @@ -308,7 +308,7 @@ bool SROA::performScalarRepl(Function &F) { DOUT << "CONVERT TO SCALAR INTEGER: " << *AI << "\n"; // Create and insert the integer alloca. - const Type *NewTy = Context->getIntegerType(AllocaSize*8); + const Type *NewTy = F.getContext().getIntegerType(AllocaSize*8); NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin()); ConvertUsesToScalar(AI, NewAI, 0); } @@ -331,6 +331,7 @@ void SROA::DoScalarReplacement(AllocationInst *AI, std::vector<AllocationInst*> &WorkList) { DOUT << "Found inst to SROA: " << *AI; SmallVector<AllocaInst*, 32> ElementAllocas; + LLVMContext &Context = AI->getContext(); if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { ElementAllocas.reserve(ST->getNumContainedTypes()); for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { @@ -372,7 +373,7 @@ void SROA::DoScalarReplacement(AllocationInst *AI, // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1 // (Also works for arrays instead of structs) if (LoadInst *LI = dyn_cast<LoadInst>(User)) { - Value *Insert = Context->getUndef(LI->getType()); + Value *Insert = Context.getUndef(LI->getType()); for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { Value *Load = new LoadInst(ElementAllocas[i], "load", LI); Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI); @@ -419,7 +420,7 @@ void SROA::DoScalarReplacement(AllocationInst *AI, // expanded itself once the worklist is rerun. // SmallVector<Value*, 8> NewArgs; - NewArgs.push_back(Context->getNullValue(Type::Int32Ty)); + NewArgs.push_back(Context.getNullValue(Type::Int32Ty)); NewArgs.append(GEPI->op_begin()+3, GEPI->op_end()); RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(), NewArgs.end(), "", GEPI); @@ -513,6 +514,7 @@ static bool AllUsersAreLoads(Value *Ptr) { /// void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, AllocaInfo &Info) { + LLVMContext &Context = User->getContext(); if (BitCastInst *C = dyn_cast<BitCastInst>(User)) return isSafeUseOfBitCastedAllocation(C, AI, Info); @@ -532,7 +534,7 @@ void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". if (I == E || - I.getOperand() != Context->getNullValue(I.getOperand()->getType())) { + I.getOperand() != Context.getNullValue(I.getOperand()->getType())) { return MarkUnsafe(Info); } @@ -728,6 +730,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, // that doesn't have anything to do with the alloca that we are promoting. For // memset, this Value* stays null. Value *OtherPtr = 0; + LLVMContext &Context = MI->getContext(); unsigned MemAlignment = MI->getAlignment(); if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy if (BCInst == MTI->getRawDest()) @@ -765,7 +768,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, const Type *BytePtrTy = MI->getRawDest()->getType(); bool SROADest = MI->getRawDest() == BCInst; - Constant *Zero = Context->getNullValue(Type::Int32Ty); + Constant *Zero = Context.getNullValue(Type::Int32Ty); for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { // If this is a memcpy/memmove, emit a GEP of the other element address. @@ -773,7 +776,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, unsigned OtherEltAlign = MemAlignment; if (OtherPtr) { - Value *Idx[2] = { Zero, Context->getConstantInt(Type::Int32Ty, i) }; + Value *Idx[2] = { Zero, Context.getConstantInt(Type::Int32Ty, i) }; OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2, OtherPtr->getNameStr()+"."+utostr(i), MI); @@ -820,7 +823,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, Constant *StoreVal; if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) { if (CI->isZero()) { - StoreVal = Context->getNullValue(EltTy); // 0.0, null, 0, <0,0> + StoreVal = Context.getNullValue(EltTy); // 0.0, null, 0, <0,0> } else { // If EltTy is a vector type, get the element type. const Type *ValTy = EltTy->getScalarType(); @@ -836,18 +839,18 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, } // Convert the integer value to the appropriate type. - StoreVal = Context->getConstantInt(TotalVal); + StoreVal = Context.getConstantInt(TotalVal); if (isa<PointerType>(ValTy)) - StoreVal = Context->getConstantExprIntToPtr(StoreVal, ValTy); + StoreVal = Context.getConstantExprIntToPtr(StoreVal, ValTy); else if (ValTy->isFloatingPoint()) - StoreVal = Context->getConstantExprBitCast(StoreVal, ValTy); + StoreVal = Context.getConstantExprBitCast(StoreVal, ValTy); assert(StoreVal->getType() == ValTy && "Type mismatch!"); // If the requested value was a vector constant, create it. if (EltTy != ValTy) { unsigned NumElts = cast<VectorType>(ValTy)->getNumElements(); SmallVector<Constant*, 16> Elts(NumElts, StoreVal); - StoreVal = Context->getConstantVector(&Elts[0], NumElts); + StoreVal = Context.getConstantVector(&Elts[0], NumElts); } } new StoreInst(StoreVal, EltPtr, MI); @@ -873,15 +876,15 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, Value *Ops[] = { SROADest ? EltPtr : OtherElt, // Dest ptr SROADest ? OtherElt : EltPtr, // Src ptr - Context->getConstantInt(MI->getOperand(3)->getType(), EltSize), // Size - Context->getConstantInt(Type::Int32Ty, OtherEltAlign) // Align + Context.getConstantInt(MI->getOperand(3)->getType(), EltSize), // Size + Context.getConstantInt(Type::Int32Ty, OtherEltAlign) // Align }; CallInst::Create(TheFn, Ops, Ops + 4, "", MI); } else { assert(isa<MemSetInst>(MI)); Value *Ops[] = { EltPtr, MI->getOperand(2), // Dest, Value, - Context->getConstantInt(MI->getOperand(3)->getType(), EltSize), // Size + Context.getConstantInt(MI->getOperand(3)->getType(), EltSize), // Size Zero // Align }; CallInst::Create(TheFn, Ops, Ops + 4, "", MI); @@ -898,6 +901,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, SmallVector<AllocaInst*, 32> &NewElts){ // Extract each element out of the integer according to its structure offset // and store the element value to the individual alloca. + LLVMContext &Context = SI->getContext(); Value *SrcVal = SI->getOperand(0); const Type *AllocaEltTy = AI->getType()->getElementType(); uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy); @@ -911,7 +915,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, // Handle tail padding by extending the operand if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits) SrcVal = new ZExtInst(SrcVal, - Context->getIntegerType(AllocaSizeBits), "", SI); + Context.getIntegerType(AllocaSizeBits), "", SI); DOUT << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << *SI; @@ -930,7 +934,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, Value *EltVal = SrcVal; if (Shift) { - Value *ShiftVal = Context->getConstantInt(EltVal->getType(), Shift); + Value *ShiftVal = Context.getConstantInt(EltVal->getType(), Shift); EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, "sroa.store.elt", SI); } @@ -943,7 +947,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, if (FieldSizeBits != AllocaSizeBits) EltVal = new TruncInst(EltVal, - Context->getIntegerType(FieldSizeBits), "", SI); + Context.getIntegerType(FieldSizeBits), "", SI); Value *DestField = NewElts[i]; if (EltVal->getType() == FieldTy) { // Storing to an integer field of this size, just do it. @@ -953,7 +957,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, } else { // Otherwise, bitcast the dest pointer (for aggregates). DestField = new BitCastInst(DestField, - Context->getPointerTypeUnqual(EltVal->getType()), + Context.getPointerTypeUnqual(EltVal->getType()), "", SI); } new StoreInst(EltVal, DestField, SI); @@ -978,7 +982,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, Value *EltVal = SrcVal; if (Shift) { - Value *ShiftVal = Context->getConstantInt(EltVal->getType(), Shift); + Value *ShiftVal = Context.getConstantInt(EltVal->getType(), Shift); EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal, "sroa.store.elt", SI); } @@ -986,7 +990,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, // Truncate down to an integer of the right size. if (ElementSizeBits != AllocaSizeBits) EltVal = new TruncInst(EltVal, - Context->getIntegerType(ElementSizeBits),"",SI); + Context.getIntegerType(ElementSizeBits),"",SI); Value *DestField = NewElts[i]; if (EltVal->getType() == ArrayEltTy) { // Storing to an integer field of this size, just do it. @@ -996,7 +1000,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, } else { // Otherwise, bitcast the dest pointer (for aggregates). DestField = new BitCastInst(DestField, - Context->getPointerTypeUnqual(EltVal->getType()), + Context.getPointerTypeUnqual(EltVal->getType()), "", SI); } new StoreInst(EltVal, DestField, SI); @@ -1039,9 +1043,11 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType(); ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy); } - - Value *ResultVal = - Context->getNullValue(Context->getIntegerType(AllocaSizeBits)); + + LLVMContext &Context = LI->getContext(); + + Value *ResultVal = + Context.getNullValue(Context.getIntegerType(AllocaSizeBits)); for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { // Load the value from the alloca. If the NewElt is an aggregate, cast @@ -1054,11 +1060,11 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, // Ignore zero sized fields like {}, they obviously contain no data. if (FieldSizeBits == 0) continue; - const IntegerType *FieldIntTy = Context->getIntegerType(FieldSizeBits); + const IntegerType *FieldIntTy = Context.getIntegerType(FieldSizeBits); if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() && !isa<VectorType>(FieldTy)) SrcField = new BitCastInst(SrcField, - Context->getPointerTypeUnqual(FieldIntTy), + Context.getPointerTypeUnqual(FieldIntTy), "", LI); SrcField = new LoadInst(SrcField, "sroa.load.elt", LI); @@ -1083,7 +1089,7 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth(); if (Shift) { - Value *ShiftVal = Context->getConstantInt(SrcField->getType(), Shift); + Value *ShiftVal = Context.getConstantInt(SrcField->getType(), Shift); SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI); } @@ -1186,8 +1192,10 @@ void SROA::CleanupGEP(GetElementPtrInst *GEPI) { if (isa<ConstantInt>(I.getOperand())) return; + LLVMContext &Context = GEPI->getContext(); + if (NumElements == 1) { - GEPI->setOperand(2, Context->getNullValue(Type::Int32Ty)); + GEPI->setOperand(2, Context.getNullValue(Type::Int32Ty)); return; } @@ -1195,16 +1203,16 @@ void SROA::CleanupGEP(GetElementPtrInst *GEPI) { // All users of the GEP must be loads. At each use of the GEP, insert // two loads of the appropriate indexed GEP and select between them. Value *IsOne = new ICmpInst(GEPI, ICmpInst::ICMP_NE, I.getOperand(), - Context->getNullValue(I.getOperand()->getType()), + Context.getNullValue(I.getOperand()->getType()), "isone"); // Insert the new GEP instructions, which are properly indexed. SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end()); - Indices[1] = Context->getNullValue(Type::Int32Ty); + Indices[1] = Context.getNullValue(Type::Int32Ty); Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0), Indices.begin(), Indices.end(), GEPI->getName()+".0", GEPI); - Indices[1] = Context->getConstantInt(Type::Int32Ty, 1); + Indices[1] = Context.getConstantInt(Type::Int32Ty, 1); Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0), Indices.begin(), Indices.end(), @@ -1262,7 +1270,7 @@ void SROA::CleanupAllocaUsers(AllocationInst *AI) { /// and stores would mutate the memory. static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy, unsigned AllocaSize, const TargetData &TD, - LLVMContext *Context) { + LLVMContext &Context) { // If this could be contributing to a vector, analyze it. if (VecTy != Type::VoidTy) { // either null or a vector type. @@ -1290,7 +1298,7 @@ static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy, cast<VectorType>(VecTy)->getElementType() ->getPrimitiveSizeInBits()/8 == EltSize)) { if (VecTy == 0) - VecTy = Context->getVectorType(In, AllocaSize/EltSize); + VecTy = In->getContext().getVectorType(In, AllocaSize/EltSize); return; } } @@ -1321,7 +1329,8 @@ bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, // Don't break volatile loads. if (LI->isVolatile()) return false; - MergeInType(LI->getType(), Offset, VecTy, AllocaSize, *TD, Context); + MergeInType(LI->getType(), Offset, VecTy, + AllocaSize, *TD, V->getContext()); SawVec |= isa<VectorType>(LI->getType()); continue; } @@ -1330,7 +1339,7 @@ bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, // Storing the pointer, not into the value? if (SI->getOperand(0) == V || SI->isVolatile()) return 0; MergeInType(SI->getOperand(0)->getType(), Offset, - VecTy, AllocaSize, *TD, Context); + VecTy, AllocaSize, *TD, V->getContext()); SawVec |= isa<VectorType>(SI->getOperand(0)->getType()); continue; } @@ -1459,7 +1468,8 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) { APVal |= APVal << 8; Value *Old = Builder.CreateLoad(NewAI, (NewAI->getName()+".in").c_str()); - Value *New = ConvertScalar_InsertValue(Context->getConstantInt(APVal), + Value *New = ConvertScalar_InsertValue( + User->getContext().getConstantInt(APVal), Old, Offset, Builder); Builder.CreateStore(New, NewAI); } @@ -1531,6 +1541,8 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType, if (FromVal->getType() == ToType && Offset == 0) return FromVal; + LLVMContext &Context = FromVal->getContext(); + // If the result alloca is a vector type, this is either an element // access or a bitcast to another vector type of the same size. if (const VectorType *VTy = dyn_cast<VectorType>(FromVal->getType())) { @@ -1546,7 +1558,7 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType, } // Return the element extracted out of it. Value *V = Builder.CreateExtractElement(FromVal, - Context->getConstantInt(Type::Int32Ty,Elt), + Context.getConstantInt(Type::Int32Ty,Elt), "tmp"); if (V->getType() != ToType) V = Builder.CreateBitCast(V, ToType, "tmp"); @@ -1557,7 +1569,7 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType, // use insertvalue's to form the FCA. if (const StructType *ST = dyn_cast<StructType>(ToType)) { const StructLayout &Layout = *TD->getStructLayout(ST); - Value *Res = Context->getUndef(ST); + Value *Res = Context.getUndef(ST); for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i), Offset+Layout.getElementOffsetInBits(i), @@ -1569,7 +1581,7 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType, if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) { uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType()); - Value *Res = Context->getUndef(AT); + Value *Res = Context.getUndef(AT); for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(), Offset+i*EltSize, Builder); @@ -1599,21 +1611,21 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType, // only some bits are used. if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth()) FromVal = Builder.CreateLShr(FromVal, - Context->getConstantInt(FromVal->getType(), + Context.getConstantInt(FromVal->getType(), ShAmt), "tmp"); else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth()) FromVal = Builder.CreateShl(FromVal, - Context->getConstantInt(FromVal->getType(), + Context.getConstantInt(FromVal->getType(), -ShAmt), "tmp"); // Finally, unconditionally truncate the integer to the right width. unsigned LIBitWidth = TD->getTypeSizeInBits(ToType); if (LIBitWidth < NTy->getBitWidth()) FromVal = - Builder.CreateTrunc(FromVal, Context->getIntegerType(LIBitWidth), "tmp"); + Builder.CreateTrunc(FromVal, Context.getIntegerType(LIBitWidth), "tmp"); else if (LIBitWidth > NTy->getBitWidth()) FromVal = - Builder.CreateZExt(FromVal, Context->getIntegerType(LIBitWidth), "tmp"); + Builder.CreateZExt(FromVal, Context.getIntegerType(LIBitWidth), "tmp"); // If the result is an integer, this is a trunc or bitcast. if (isa<IntegerType>(ToType)) { @@ -1645,6 +1657,7 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old, // Convert the stored type to the actual type, shift it left to insert // then 'or' into place. const Type *AllocaType = Old->getType(); + LLVMContext &Context = Old->getContext(); if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) { uint64_t VecSize = TD->getTypeAllocSizeInBits(VTy); @@ -1664,7 +1677,7 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old, SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp"); SV = Builder.CreateInsertElement(Old, SV, - Context->getConstantInt(Type::Int32Ty, Elt), + Context.getConstantInt(Type::Int32Ty, Elt), "tmp"); return SV; } @@ -1697,7 +1710,7 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old, unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType()); unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType); if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType())) - SV = Builder.CreateBitCast(SV, Context->getIntegerType(SrcWidth), "tmp"); + SV = Builder.CreateBitCast(SV, Context.getIntegerType(SrcWidth), "tmp"); else if (isa<PointerType>(SV->getType())) SV = Builder.CreatePtrToInt(SV, TD->getIntPtrType(), "tmp"); @@ -1732,11 +1745,11 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old, // only some bits in the structure are set. APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth)); if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) { - SV = Builder.CreateShl(SV, Context->getConstantInt(SV->getType(), + SV = Builder.CreateShl(SV, Context.getConstantInt(SV->getType(), ShAmt), "tmp"); Mask <<= ShAmt; } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) { - SV = Builder.CreateLShr(SV, Context->getConstantInt(SV->getType(), + SV = Builder.CreateLShr(SV, Context.getConstantInt(SV->getType(), -ShAmt), "tmp"); Mask = Mask.lshr(-ShAmt); } @@ -1745,7 +1758,7 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old, // in the new bits. if (SrcWidth != DestWidth) { assert(DestWidth > SrcWidth); - Old = Builder.CreateAnd(Old, Context->getConstantInt(~Mask), "mask"); + Old = Builder.CreateAnd(Old, Context.getConstantInt(~Mask), "mask"); SV = Builder.CreateOr(Old, SV, "ins"); } return SV; diff --git a/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/lib/Transforms/Scalar/SimplifyCFGPass.cpp index f6cffdd..3596ed6 100644 --- a/lib/Transforms/Scalar/SimplifyCFGPass.cpp +++ b/lib/Transforms/Scalar/SimplifyCFGPass.cpp @@ -58,7 +58,7 @@ FunctionPass *llvm::createCFGSimplificationPass() { /// ChangeToUnreachable - Insert an unreachable instruction before the specified /// instruction, making it and the rest of the code in the block dead. -static void ChangeToUnreachable(Instruction *I, LLVMContext *Context) { +static void ChangeToUnreachable(Instruction *I, LLVMContext &Context) { BasicBlock *BB = I->getParent(); // Loop over all of the successors, removing BB's entry from any PHI // nodes. @@ -71,7 +71,7 @@ static void ChangeToUnreachable(Instruction *I, LLVMContext *Context) { BasicBlock::iterator BBI = I, BBE = BB->end(); while (BBI != BBE) { if (!BBI->use_empty()) - BBI->replaceAllUsesWith(Context->getUndef(BBI->getType())); + BBI->replaceAllUsesWith(Context.getUndef(BBI->getType())); BB->getInstList().erase(BBI++); } } @@ -97,7 +97,7 @@ static void ChangeToCall(InvokeInst *II) { static bool MarkAliveBlocks(BasicBlock *BB, SmallPtrSet<BasicBlock*, 128> &Reachable, - LLVMContext *Context) { + LLVMContext &Context) { SmallVector<BasicBlock*, 128> Worklist; Worklist.push_back(BB); diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp index 7e6b353..837024f 100644 --- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp +++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp @@ -65,7 +65,7 @@ public: Caller = CI->getParent()->getParent(); this->TD = &TD; if (CI->getCalledFunction()) - Context = CI->getCalledFunction()->getContext(); + Context = &CI->getCalledFunction()->getContext(); return CallOptimizer(CI->getCalledFunction(), CI, B); } @@ -1639,7 +1639,7 @@ bool SimplifyLibCalls::runOnFunction(Function &F) { const TargetData &TD = getAnalysis<TargetData>(); - IRBuilder<> Builder(*Context); + IRBuilder<> Builder(F.getContext()); bool Changed = false; for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { @@ -1730,8 +1730,6 @@ void SimplifyLibCalls::setDoesNotAlias(Function &F, unsigned n) { /// doInitialization - Add attributes to well-known functions. /// bool SimplifyLibCalls::doInitialization(Module &M) { - Context = &M.getContext(); - Modified = false; for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) { Function &F = *I; diff --git a/lib/Transforms/Scalar/TailDuplication.cpp b/lib/Transforms/Scalar/TailDuplication.cpp index 6d05fdf..b5409bb 100644 --- a/lib/Transforms/Scalar/TailDuplication.cpp +++ b/lib/Transforms/Scalar/TailDuplication.cpp @@ -305,7 +305,7 @@ void TailDup::eliminateUnconditionalBranch(BranchInst *Branch) { // keeping track of the mapping... // for (; BI != DestBlock->end(); ++BI) { - Instruction *New = BI->clone(*Context); + Instruction *New = BI->clone(BI->getContext()); New->setName(BI->getName()); SourceBlock->getInstList().push_back(New); ValueMapping[BI] = New; @@ -359,7 +359,7 @@ void TailDup::eliminateUnconditionalBranch(BranchInst *Branch) { Instruction *Inst = BI++; if (isInstructionTriviallyDead(Inst)) Inst->eraseFromParent(); - else if (Constant *C = ConstantFoldInstruction(Inst, Context)) { + else if (Constant *C = ConstantFoldInstruction(Inst, BI->getContext())) { Inst->replaceAllUsesWith(C); Inst->eraseFromParent(); } |