summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-05-29 02:49:00 -0700
committerStephen Hines <srhines@google.com>2014-05-29 02:49:00 -0700
commitdce4a407a24b04eebc6a376f8e62b41aaa7b071f (patch)
treedcebc53f2b182f145a2e659393bf9a0472cedf23 /include
parent220b921aed042f9e520c26cffd8282a94c66c3d5 (diff)
downloadexternal_llvm-dce4a407a24b04eebc6a376f8e62b41aaa7b071f.zip
external_llvm-dce4a407a24b04eebc6a376f8e62b41aaa7b071f.tar.gz
external_llvm-dce4a407a24b04eebc6a376f8e62b41aaa7b071f.tar.bz2
Update LLVM for 3.5 rebase (r209712).
Change-Id: I149556c940fb7dc92d075273c87ff584f400941f
Diffstat (limited to 'include')
-rw-r--r--include/llvm-c/Core.h54
-rw-r--r--include/llvm-c/Object.h1
-rw-r--r--include/llvm-c/Transforms/PassManagerBuilder.h1
-rw-r--r--include/llvm-c/lto.h22
-rw-r--r--include/llvm-c/module.modulemap5
-rw-r--r--include/llvm/ADT/APFloat.h9
-rw-r--r--include/llvm/ADT/ArrayRef.h18
-rw-r--r--include/llvm/ADT/BitVector.h8
-rw-r--r--include/llvm/ADT/DenseMap.h8
-rw-r--r--include/llvm/ADT/DepthFirstIterator.h13
-rw-r--r--include/llvm/ADT/EquivalenceClasses.h12
-rw-r--r--include/llvm/ADT/FoldingSet.h4
-rw-r--r--include/llvm/ADT/Hashing.h6
-rw-r--r--include/llvm/ADT/ImmutableIntervalMap.h248
-rw-r--r--include/llvm/ADT/ImmutableMap.h4
-rw-r--r--include/llvm/ADT/ImmutableSet.h21
-rw-r--r--include/llvm/ADT/IntervalMap.h6
-rw-r--r--include/llvm/ADT/IntrusiveRefCntPtr.h8
-rw-r--r--include/llvm/ADT/OwningPtr.h10
-rw-r--r--include/llvm/ADT/PointerUnion.h46
-rw-r--r--include/llvm/ADT/PostOrderIterator.h4
-rw-r--r--include/llvm/ADT/SCCIterator.h225
-rw-r--r--include/llvm/ADT/STLExtras.h155
-rw-r--r--include/llvm/ADT/ScopedHashTable.h14
-rw-r--r--include/llvm/ADT/SmallVector.h115
-rw-r--r--include/llvm/ADT/SparseMultiSet.h2
-rw-r--r--include/llvm/ADT/SparseSet.h2
-rw-r--r--include/llvm/ADT/Statistic.h2
-rw-r--r--include/llvm/ADT/StringExtras.h2
-rw-r--r--include/llvm/ADT/StringMap.h66
-rw-r--r--include/llvm/ADT/StringRef.h11
-rw-r--r--include/llvm/ADT/StringSwitch.h2
-rw-r--r--include/llvm/ADT/TinyPtrVector.h14
-rw-r--r--include/llvm/ADT/Triple.h9
-rw-r--r--include/llvm/ADT/Twine.h2
-rw-r--r--include/llvm/ADT/edit_distance.h4
-rw-r--r--include/llvm/ADT/ilist.h18
-rw-r--r--include/llvm/ADT/ilist_node.h12
-rw-r--r--include/llvm/ADT/iterator.h244
-rw-r--r--include/llvm/ADT/iterator_range.h8
-rw-r--r--include/llvm/Analysis/AliasAnalysis.h12
-rw-r--r--include/llvm/Analysis/AliasSetTracker.h25
-rw-r--r--include/llvm/Analysis/BlockFrequencyImpl.h379
-rw-r--r--include/llvm/Analysis/BlockFrequencyInfo.h12
-rw-r--r--include/llvm/Analysis/BlockFrequencyInfoImpl.h1859
-rw-r--r--include/llvm/Analysis/BranchProbabilityInfo.h2
-rw-r--r--include/llvm/Analysis/CFG.h8
-rw-r--r--include/llvm/Analysis/CGSCCPassManager.h591
-rw-r--r--include/llvm/Analysis/ConstantFolding.h22
-rw-r--r--include/llvm/Analysis/DOTGraphTraitsPass.h1
-rw-r--r--include/llvm/Analysis/DependenceAnalysis.h13
-rw-r--r--include/llvm/Analysis/DominanceFrontier.h2
-rw-r--r--include/llvm/Analysis/IVUsers.h2
-rw-r--r--include/llvm/Analysis/InstructionSimplify.h192
-rw-r--r--include/llvm/Analysis/IntervalPartition.h6
-rw-r--r--include/llvm/Analysis/LazyCallGraph.h433
-rw-r--r--include/llvm/Analysis/LazyValueInfo.h4
-rw-r--r--include/llvm/Analysis/LibCallAliasAnalysis.h2
-rw-r--r--include/llvm/Analysis/LibCallSemantics.h2
-rw-r--r--include/llvm/Analysis/Loads.h7
-rw-r--r--include/llvm/Analysis/LoopInfo.h49
-rw-r--r--include/llvm/Analysis/LoopInfoImpl.h31
-rw-r--r--include/llvm/Analysis/MemoryBuiltins.h2
-rw-r--r--include/llvm/Analysis/MemoryDependenceAnalysis.h9
-rw-r--r--include/llvm/Analysis/PHITransAddr.h3
-rw-r--r--include/llvm/Analysis/PtrUseVisitor.h12
-rw-r--r--include/llvm/Analysis/RegionInfo.h9
-rw-r--r--include/llvm/Analysis/ScalarEvolution.h17
-rw-r--r--include/llvm/Analysis/ScalarEvolutionExpander.h6
-rw-r--r--include/llvm/Analysis/ScalarEvolutionExpressions.h88
-rw-r--r--include/llvm/Analysis/SparsePropagation.h2
-rw-r--r--include/llvm/Analysis/TargetTransformInfo.h6
-rw-r--r--include/llvm/Analysis/ValueTracking.h37
-rw-r--r--include/llvm/Bitcode/BitstreamReader.h12
-rw-r--r--include/llvm/Bitcode/BitstreamWriter.h8
-rw-r--r--include/llvm/Bitcode/LLVMBitCodes.h5
-rw-r--r--include/llvm/Bitcode/ReaderWriter.h4
-rw-r--r--include/llvm/CMakeLists.txt6
-rw-r--r--include/llvm/CodeGen/Analysis.h2
-rw-r--r--include/llvm/CodeGen/AsmPrinter.h957
-rw-r--r--include/llvm/CodeGen/CallingConvLower.h56
-rw-r--r--include/llvm/CodeGen/CommandFlags.h32
-rw-r--r--include/llvm/CodeGen/EdgeBundles.h5
-rw-r--r--include/llvm/CodeGen/FastISel.h6
-rw-r--r--include/llvm/CodeGen/FunctionLoweringInfo.h4
-rw-r--r--include/llvm/CodeGen/GCMetadata.h5
-rw-r--r--include/llvm/CodeGen/GCStrategy.h4
-rw-r--r--include/llvm/CodeGen/ISDOpcodes.h5
-rw-r--r--include/llvm/CodeGen/JITCodeEmitter.h6
-rw-r--r--include/llvm/CodeGen/LatencyPriorityQueue.h2
-rw-r--r--include/llvm/CodeGen/LexicalScopes.h218
-rw-r--r--include/llvm/CodeGen/LinkAllCodegenComponents.h15
-rw-r--r--include/llvm/CodeGen/LiveInterval.h24
-rw-r--r--include/llvm/CodeGen/LiveIntervalAnalysis.h6
-rw-r--r--include/llvm/CodeGen/LiveIntervalUnion.h6
-rw-r--r--include/llvm/CodeGen/LivePhysRegs.h2
-rw-r--r--include/llvm/CodeGen/LiveRangeEdit.h4
-rw-r--r--include/llvm/CodeGen/LiveRegMatrix.h3
-rw-r--r--include/llvm/CodeGen/LiveStackAnalysis.h2
-rw-r--r--include/llvm/CodeGen/MachineBasicBlock.h37
-rw-r--r--include/llvm/CodeGen/MachineBlockFrequencyInfo.h12
-rw-r--r--include/llvm/CodeGen/MachineCodeEmitter.h2
-rw-r--r--include/llvm/CodeGen/MachineCodeInfo.h2
-rw-r--r--include/llvm/CodeGen/MachineFrameInfo.h2
-rw-r--r--include/llvm/CodeGen/MachineFunction.h17
-rw-r--r--include/llvm/CodeGen/MachineInstr.h89
-rw-r--r--include/llvm/CodeGen/MachineInstrBuilder.h2
-rw-r--r--include/llvm/CodeGen/MachineInstrBundle.h2
-rw-r--r--include/llvm/CodeGen/MachineMemOperand.h31
-rw-r--r--include/llvm/CodeGen/MachineModuleInfo.h6
-rw-r--r--include/llvm/CodeGen/MachineOperand.h20
-rw-r--r--include/llvm/CodeGen/MachinePassRegistry.h4
-rw-r--r--include/llvm/CodeGen/MachinePostDominators.h2
-rw-r--r--include/llvm/CodeGen/MachineRegisterInfo.h66
-rw-r--r--include/llvm/CodeGen/MachineSSAUpdater.h2
-rw-r--r--include/llvm/CodeGen/MachineScheduler.h39
-rw-r--r--include/llvm/CodeGen/MachineTraceMetrics.h2
-rw-r--r--include/llvm/CodeGen/MachineValueType.h1
-rw-r--r--include/llvm/CodeGen/PBQP/CostAllocator.h2
-rw-r--r--include/llvm/CodeGen/PBQP/Graph.h8
-rw-r--r--include/llvm/CodeGen/PBQP/RegAllocSolver.h4
-rw-r--r--include/llvm/CodeGen/Passes.h21
-rw-r--r--include/llvm/CodeGen/PseudoSourceValue.h31
-rw-r--r--include/llvm/CodeGen/RegAllocPBQP.h2
-rw-r--r--include/llvm/CodeGen/RegisterClassInfo.h9
-rw-r--r--include/llvm/CodeGen/RegisterPressure.h15
-rw-r--r--include/llvm/CodeGen/RegisterScavenging.h6
-rw-r--r--include/llvm/CodeGen/ResourcePriorityQueue.h2
-rw-r--r--include/llvm/CodeGen/ScheduleDAG.h71
-rw-r--r--include/llvm/CodeGen/ScheduleDAGInstrs.h13
-rw-r--r--include/llvm/CodeGen/ScoreboardHazardRecognizer.h4
-rw-r--r--include/llvm/CodeGen/SelectionDAG.h134
-rw-r--r--include/llvm/CodeGen/SelectionDAGISel.h4
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h83
-rw-r--r--include/llvm/CodeGen/SlotIndexes.h22
-rw-r--r--include/llvm/CodeGen/StackMaps.h40
-rw-r--r--include/llvm/CodeGen/StackProtector.h5
-rw-r--r--include/llvm/CodeGen/TargetLoweringObjectFileImpl.h17
-rw-r--r--include/llvm/CodeGen/TargetSchedule.h6
-rw-r--r--include/llvm/CodeGen/ValueTypes.h6
-rw-r--r--include/llvm/CodeGen/VirtRegMap.h2
-rw-r--r--include/llvm/DebugInfo/DIContext.h50
-rw-r--r--include/llvm/DebugInfo/DWARFFormValue.h4
-rw-r--r--include/llvm/ExecutionEngine/ExecutionEngine.h55
-rw-r--r--include/llvm/ExecutionEngine/JITEventListener.h8
-rw-r--r--include/llvm/ExecutionEngine/ObjectImage.h8
-rw-r--r--include/llvm/ExecutionEngine/RTDyldMemoryManager.h2
-rw-r--r--include/llvm/ExecutionEngine/RuntimeDyld.h2
-rw-r--r--include/llvm/ExecutionEngine/SectionMemoryManager.h2
-rw-r--r--include/llvm/IR/Argument.h6
-rw-r--r--include/llvm/IR/Attributes.h14
-rw-r--r--include/llvm/IR/BasicBlock.h13
-rw-r--r--include/llvm/IR/CallSite.h13
-rw-r--r--include/llvm/IR/CallingConv.h8
-rw-r--r--include/llvm/IR/ConstantRange.h4
-rw-r--r--include/llvm/IR/Constants.h8
-rw-r--r--include/llvm/IR/DIBuilder.h54
-rw-r--r--include/llvm/IR/DataLayout.h17
-rw-r--r--include/llvm/IR/DebugInfo.h51
-rw-r--r--include/llvm/IR/DebugLoc.h5
-rw-r--r--include/llvm/IR/DerivedTypes.h6
-rw-r--r--include/llvm/IR/DiagnosticInfo.h192
-rw-r--r--include/llvm/IR/Dominators.h2
-rw-r--r--include/llvm/IR/Function.h18
-rw-r--r--include/llvm/IR/GVMaterializer.h20
-rw-r--r--include/llvm/IR/GetElementPtrTypeIterator.h4
-rw-r--r--include/llvm/IR/GlobalAlias.h47
-rw-r--r--include/llvm/IR/GlobalObject.h58
-rw-r--r--include/llvm/IR/GlobalValue.h143
-rw-r--r--include/llvm/IR/GlobalVariable.h8
-rw-r--r--include/llvm/IR/IRBuilder.h70
-rw-r--r--include/llvm/IR/InstrTypes.h36
-rw-r--r--include/llvm/IR/Instruction.h6
-rw-r--r--include/llvm/IR/Instructions.h271
-rw-r--r--include/llvm/IR/Intrinsics.td5
-rw-r--r--include/llvm/IR/IntrinsicsAArch64.td983
-rw-r--r--include/llvm/IR/IntrinsicsARM.td8
-rw-r--r--include/llvm/IR/IntrinsicsARM64.td628
-rw-r--r--include/llvm/IR/IntrinsicsNVVM.td920
-rw-r--r--include/llvm/IR/IntrinsicsX86.td197
-rw-r--r--include/llvm/IR/LLVMContext.h36
-rw-r--r--include/llvm/IR/LegacyPassManagers.h12
-rw-r--r--include/llvm/IR/LegacyPassNameParser.h8
-rw-r--r--include/llvm/IR/MDBuilder.h120
-rw-r--r--include/llvm/IR/Metadata.h4
-rw-r--r--include/llvm/IR/Module.h148
-rw-r--r--include/llvm/IR/PassManager.h12
-rw-r--r--include/llvm/IR/PredIteratorCache.h2
-rw-r--r--include/llvm/IR/SymbolTableListTraits.h8
-rw-r--r--include/llvm/IR/Type.h6
-rw-r--r--include/llvm/IR/Use.h4
-rw-r--r--include/llvm/IR/User.h37
-rw-r--r--include/llvm/IR/Value.h68
-rw-r--r--include/llvm/IR/ValueHandle.h10
-rw-r--r--include/llvm/IR/ValueMap.h6
-rw-r--r--include/llvm/IR/Verifier.h17
-rw-r--r--include/llvm/InitializePasses.h3
-rw-r--r--include/llvm/LTO/LTOCodeGenerator.h42
-rw-r--r--include/llvm/LTO/LTOModule.h152
-rw-r--r--include/llvm/LineEditor/LineEditor.h4
-rw-r--r--include/llvm/LinkAllPasses.h17
-rw-r--r--include/llvm/MC/MCAsmInfo.h2
-rw-r--r--include/llvm/MC/MCAsmLayout.h8
-rw-r--r--include/llvm/MC/MCAssembler.h81
-rw-r--r--include/llvm/MC/MCContext.h37
-rw-r--r--include/llvm/MC/MCDisassembler.h33
-rw-r--r--include/llvm/MC/MCDwarf.h18
-rw-r--r--include/llvm/MC/MCELFStreamer.h7
-rw-r--r--include/llvm/MC/MCELFSymbolFlags.h8
-rw-r--r--include/llvm/MC/MCExpr.h15
-rw-r--r--include/llvm/MC/MCExternalSymbolizer.h2
-rw-r--r--include/llvm/MC/MCFixup.h2
-rw-r--r--include/llvm/MC/MCFunction.h12
-rw-r--r--include/llvm/MC/MCInst.h8
-rw-r--r--include/llvm/MC/MCInstPrinter.h5
-rw-r--r--include/llvm/MC/MCInstrDesc.h6
-rw-r--r--include/llvm/MC/MCInstrItineraries.h6
-rw-r--r--include/llvm/MC/MCModule.h5
-rw-r--r--include/llvm/MC/MCObjectFileInfo.h9
-rw-r--r--include/llvm/MC/MCObjectStreamer.h15
-rw-r--r--include/llvm/MC/MCParser/AsmLexer.h2
-rw-r--r--include/llvm/MC/MCParser/MCAsmParser.h2
-rw-r--r--include/llvm/MC/MCParser/MCParsedAsmOperand.h2
-rw-r--r--include/llvm/MC/MCRegisterInfo.h8
-rw-r--r--include/llvm/MC/MCSchedule.h21
-rw-r--r--include/llvm/MC/MCSectionCOFF.h5
-rw-r--r--include/llvm/MC/MCSectionELF.h5
-rw-r--r--include/llvm/MC/MCStreamer.h49
-rw-r--r--include/llvm/MC/MCSubtargetInfo.h13
-rw-r--r--include/llvm/MC/MCSymbol.h8
-rw-r--r--include/llvm/MC/MCTargetAsmParser.h16
-rw-r--r--include/llvm/MC/MCTargetOptions.h54
-rw-r--r--include/llvm/MC/MCTargetOptionsCommandFlags.h44
-rw-r--r--include/llvm/MC/MCValue.h12
-rw-r--r--include/llvm/MC/MCWin64EH.h10
-rw-r--r--include/llvm/MC/MCWinCOFFObjectWriter.h1
-rw-r--r--include/llvm/MC/MCWinCOFFStreamer.h75
-rw-r--r--include/llvm/MC/SubtargetFeature.h13
-rw-r--r--include/llvm/Object/Archive.h8
-rw-r--r--include/llvm/Object/Binary.h3
-rw-r--r--include/llvm/Object/COFF.h9
-rw-r--r--include/llvm/Object/COFFYAML.h9
-rw-r--r--include/llvm/Object/ELF.h112
-rw-r--r--include/llvm/Object/ELFObjectFile.h25
-rw-r--r--include/llvm/Object/ELFYAML.h47
-rw-r--r--include/llvm/Object/MachO.h6
-rw-r--r--include/llvm/Object/MachOUniversal.h7
-rw-r--r--include/llvm/Object/ObjectFile.h55
-rw-r--r--include/llvm/Object/StringTableBuilder.h59
-rw-r--r--include/llvm/Object/SymbolicFile.h5
-rw-r--r--include/llvm/Object/YAML.h1
-rw-r--r--include/llvm/Option/Arg.h13
-rw-r--r--include/llvm/Option/ArgList.h23
-rw-r--r--include/llvm/Option/OptSpecifier.h2
-rw-r--r--include/llvm/Option/Option.h2
-rw-r--r--include/llvm/Pass.h3
-rw-r--r--include/llvm/PassAnalysisSupport.h4
-rw-r--r--include/llvm/PassRegistry.h2
-rw-r--r--include/llvm/PassSupport.h8
-rw-r--r--include/llvm/ProfileData/InstrProf.h1
-rw-r--r--include/llvm/ProfileData/InstrProfReader.h107
-rw-r--r--include/llvm/ProfileData/InstrProfWriter.h2
-rw-r--r--include/llvm/Support/ARMBuildAttributes.h13
-rw-r--r--include/llvm/Support/Allocator.h410
-rw-r--r--include/llvm/Support/ArrayRecycler.h4
-rw-r--r--include/llvm/Support/BlockFrequency.h10
-rw-r--r--include/llvm/Support/BranchProbability.h34
-rw-r--r--include/llvm/Support/COFF.h6
-rw-r--r--include/llvm/Support/Casting.h10
-rw-r--r--include/llvm/Support/CommandLine.h29
-rw-r--r--include/llvm/Support/Compression.h7
-rw-r--r--include/llvm/Support/CrashRecoveryContext.h35
-rw-r--r--include/llvm/Support/Debug.h16
-rw-r--r--include/llvm/Support/DynamicLibrary.h4
-rw-r--r--include/llvm/Support/ELF.h17
-rw-r--r--include/llvm/Support/ErrorHandling.h10
-rw-r--r--include/llvm/Support/FileOutputBuffer.h4
-rw-r--r--include/llvm/Support/FileSystem.h29
-rw-r--r--include/llvm/Support/FileUtilities.h2
-rw-r--r--include/llvm/Support/FormattedStream.h8
-rw-r--r--include/llvm/Support/GCOV.h50
-rw-r--r--include/llvm/Support/GenericDomTree.h45
-rw-r--r--include/llvm/Support/GenericDomTreeConstruction.h12
-rw-r--r--include/llvm/Support/GraphWriter.h9
-rw-r--r--include/llvm/Support/LEB128.h2
-rw-r--r--include/llvm/Support/LineIterator.h5
-rw-r--r--include/llvm/Support/LockFileManager.h12
-rw-r--r--include/llvm/Support/MachO.h48
-rw-r--r--include/llvm/Support/ManagedStatic.h2
-rw-r--r--include/llvm/Support/Memory.h10
-rw-r--r--include/llvm/Support/MemoryBuffer.h36
-rw-r--r--include/llvm/Support/OnDiskHashTable.h571
-rw-r--r--include/llvm/Support/Path.h5
-rw-r--r--include/llvm/Support/Program.h26
-rw-r--r--include/llvm/Support/Regex.h7
-rw-r--r--include/llvm/Support/Registry.h15
-rw-r--r--include/llvm/Support/SMLoc.h4
-rw-r--r--include/llvm/Support/SaveAndRestore.h48
-rw-r--r--include/llvm/Support/Signals.h2
-rw-r--r--include/llvm/Support/SourceMgr.h9
-rw-r--r--include/llvm/Support/StreamableMemoryObject.h2
-rw-r--r--include/llvm/Support/StringPool.h8
-rw-r--r--include/llvm/Support/TargetRegistry.h85
-rw-r--r--include/llvm/Support/Timer.h14
-rw-r--r--include/llvm/Support/Unicode.h5
-rw-r--r--include/llvm/Support/UnicodeCharRanges.h4
-rw-r--r--include/llvm/Support/YAMLParser.h181
-rw-r--r--include/llvm/Support/YAMLTraits.h157
-rw-r--r--include/llvm/Support/circular_raw_ostream.h8
-rw-r--r--include/llvm/Support/raw_ostream.h15
-rw-r--r--include/llvm/Support/system_error.h4
-rw-r--r--include/llvm/TableGen/Error.h1
-rw-r--r--include/llvm/TableGen/Main.h1
-rw-r--r--include/llvm/TableGen/Record.h184
-rw-r--r--include/llvm/TableGen/StringMatcher.h12
-rw-r--r--include/llvm/TableGen/StringToOffsetTable.h10
-rw-r--r--include/llvm/Target/Target.td9
-rw-r--r--include/llvm/Target/TargetCallingConv.h12
-rw-r--r--include/llvm/Target/TargetCallingConv.td5
-rw-r--r--include/llvm/Target/TargetFrameLowering.h6
-rw-r--r--include/llvm/Target/TargetInstrInfo.h18
-rw-r--r--include/llvm/Target/TargetIntrinsicInfo.h4
-rw-r--r--include/llvm/Target/TargetLowering.h247
-rw-r--r--include/llvm/Target/TargetLoweringObjectFile.h19
-rw-r--r--include/llvm/Target/TargetMachine.h87
-rw-r--r--include/llvm/Target/TargetOptions.h27
-rw-r--r--include/llvm/Target/TargetRegisterInfo.h18
-rw-r--r--include/llvm/Target/TargetSchedule.td2
-rw-r--r--include/llvm/Target/TargetSubtargetInfo.h12
-rw-r--r--include/llvm/Transforms/IPO.h3
-rw-r--r--include/llvm/Transforms/IPO/PassManagerBuilder.h11
-rw-r--r--include/llvm/Transforms/Instrumentation.h4
-rw-r--r--include/llvm/Transforms/ObjCARC.h1
-rw-r--r--include/llvm/Transforms/Scalar.h24
-rw-r--r--include/llvm/Transforms/Utils/BasicBlockUtils.h24
-rw-r--r--include/llvm/Transforms/Utils/BuildLibCalls.h3
-rw-r--r--include/llvm/Transforms/Utils/Cloning.h38
-rw-r--r--include/llvm/Transforms/Utils/CmpInstAnalysis.h1
-rw-r--r--include/llvm/Transforms/Utils/CodeExtractor.h3
-rw-r--r--include/llvm/Transforms/Utils/CtorUtils.h32
-rw-r--r--include/llvm/Transforms/Utils/IntegerDivision.h8
-rw-r--r--include/llvm/Transforms/Utils/Local.h34
-rw-r--r--include/llvm/Transforms/Utils/LoopUtils.h8
-rw-r--r--include/llvm/Transforms/Utils/PromoteMemToReg.h2
-rw-r--r--include/llvm/Transforms/Utils/SSAUpdater.h12
-rw-r--r--include/llvm/Transforms/Utils/SSAUpdaterImpl.h16
-rw-r--r--include/llvm/Transforms/Utils/SimplifyIndVar.h5
-rw-r--r--include/llvm/Transforms/Utils/SimplifyLibCalls.h1
-rw-r--r--include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h3
-rw-r--r--include/llvm/Transforms/Utils/UnrollLoop.h1
-rw-r--r--include/llvm/Transforms/Utils/ValueMapper.h31
-rw-r--r--include/llvm/Transforms/Utils/VectorUtils.h180
-rw-r--r--include/llvm/Transforms/Vectorize.h3
-rw-r--r--include/llvm/module.modulemap177
-rw-r--r--include/llvm/module.modulemap.build5
355 files changed, 11053 insertions, 5598 deletions
diff --git a/include/llvm-c/Core.h b/include/llvm-c/Core.h
index 50c5e3a..f37e3f8 100644
--- a/include/llvm-c/Core.h
+++ b/include/llvm-c/Core.h
@@ -124,6 +124,12 @@ typedef struct LLVMOpaquePassRegistry *LLVMPassRegistryRef;
* @see llvm::Use */
typedef struct LLVMOpaqueUse *LLVMUseRef;
+
+/**
+ * @see llvm::DiagnosticInfo
+ */
+typedef struct LLVMOpaqueDiagnosticInfo *LLVMDiagnosticInfoRef;
+
typedef enum {
LLVMZExtAttribute = 1<<0,
LLVMSExtAttribute = 1<<1,
@@ -159,7 +165,8 @@ typedef enum {
LLVMStackProtectStrongAttribute = 1ULL<<33,
LLVMCold = 1ULL << 34,
LLVMOptimizeNone = 1ULL << 35,
- LLVMInAllocaAttribute = 1ULL << 36
+ LLVMInAllocaAttribute = 1ULL << 36,
+ LLVMNonNullAttribute = 1ULL << 37
*/
} LLVMAttribute;
@@ -400,6 +407,13 @@ typedef enum {
the old one */
} LLVMAtomicRMWBinOp;
+typedef enum {
+ LLVMDSError,
+ LLVMDSWarning,
+ LLVMDSRemark,
+ LLVMDSNote
+} LLVMDiagnosticSeverity;
+
/**
* @}
*/
@@ -453,6 +467,9 @@ void LLVMEnablePrettyStackTrace(void);
* @{
*/
+typedef void (*LLVMDiagnosticHandler)(LLVMDiagnosticInfoRef, void *);
+typedef void (*LLVMYieldCallback)(LLVMContextRef, void *);
+
/**
* Create a new context.
*
@@ -467,6 +484,21 @@ LLVMContextRef LLVMContextCreate(void);
LLVMContextRef LLVMGetGlobalContext(void);
/**
+ * Set the diagnostic handler for this context.
+ */
+void LLVMContextSetDiagnosticHandler(LLVMContextRef C,
+ LLVMDiagnosticHandler Handler,
+ void *DiagnosticContext);
+
+/**
+ * Set the yield callback function for this context.
+ *
+ * @see LLVMContext::setYieldCallback()
+ */
+void LLVMContextSetYieldCallback(LLVMContextRef C, LLVMYieldCallback Callback,
+ void *OpaqueHandle);
+
+/**
* Destroy a context instance.
*
* This should be called for every call to LLVMContextCreate() or memory
@@ -474,6 +506,21 @@ LLVMContextRef LLVMGetGlobalContext(void);
*/
void LLVMContextDispose(LLVMContextRef C);
+/**
+ * Return a string representation of the DiagnosticInfo. Use
+ * LLVMDisposeMessage to free the string.
+ *
+ * @see DiagnosticInfo::print()
+ */
+char *LLVMGetDiagInfoDescription(LLVMDiagnosticInfoRef DI);
+
+/**
+ * Return an enum LLVMDiagnosticSeverity.
+ *
+ * @see DiagnosticInfo::getSeverity()
+ */
+LLVMDiagnosticSeverity LLVMGetDiagInfoSeverity(LLVMDiagnosticInfoRef DI);
+
unsigned LLVMGetMDKindIDInContext(LLVMContextRef C, const char* Name,
unsigned SLen);
unsigned LLVMGetMDKindID(const char* Name, unsigned SLen);
@@ -1121,9 +1168,10 @@ LLVMTypeRef LLVMX86MMXType(void);
macro(ConstantStruct) \
macro(ConstantVector) \
macro(GlobalValue) \
- macro(Function) \
macro(GlobalAlias) \
- macro(GlobalVariable) \
+ macro(GlobalObject) \
+ macro(Function) \
+ macro(GlobalVariable) \
macro(UndefValue) \
macro(Instruction) \
macro(BinaryOperator) \
diff --git a/include/llvm-c/Object.h b/include/llvm-c/Object.h
index c271552..447fcea 100644
--- a/include/llvm-c/Object.h
+++ b/include/llvm-c/Object.h
@@ -78,7 +78,6 @@ void LLVMMoveToNextRelocation(LLVMRelocationIteratorRef RI);
// SymbolRef accessors
const char *LLVMGetSymbolName(LLVMSymbolIteratorRef SI);
uint64_t LLVMGetSymbolAddress(LLVMSymbolIteratorRef SI);
-uint64_t LLVMGetSymbolFileOffset(LLVMSymbolIteratorRef SI);
uint64_t LLVMGetSymbolSize(LLVMSymbolIteratorRef SI);
// RelocationRef accessors
diff --git a/include/llvm-c/Transforms/PassManagerBuilder.h b/include/llvm-c/Transforms/PassManagerBuilder.h
index 545f8aa..3d7a9d6 100644
--- a/include/llvm-c/Transforms/PassManagerBuilder.h
+++ b/include/llvm-c/Transforms/PassManagerBuilder.h
@@ -19,7 +19,6 @@
typedef struct LLVMOpaquePassManagerBuilder *LLVMPassManagerBuilderRef;
#ifdef __cplusplus
-#include "llvm/Transforms/IPO/PassManagerBuilder.h"
extern "C" {
#endif
diff --git a/include/llvm-c/lto.h b/include/llvm-c/lto.h
index 049c4d7..51079896 100644
--- a/include/llvm-c/lto.h
+++ b/include/llvm-c/lto.h
@@ -40,7 +40,7 @@ typedef bool lto_bool_t;
* @{
*/
-#define LTO_API_VERSION 10
+#define LTO_API_VERSION 11
/**
* \since prior to LTO_API_VERSION=3
@@ -79,14 +79,15 @@ typedef enum {
typedef enum {
LTO_CODEGEN_PIC_MODEL_STATIC = 0,
LTO_CODEGEN_PIC_MODEL_DYNAMIC = 1,
- LTO_CODEGEN_PIC_MODEL_DYNAMIC_NO_PIC = 2
+ LTO_CODEGEN_PIC_MODEL_DYNAMIC_NO_PIC = 2,
+ LTO_CODEGEN_PIC_MODEL_DEFAULT = 3
} lto_codegen_model;
/** opaque reference to a loaded object module */
-typedef struct LTOModule* lto_module_t;
+typedef struct LLVMOpaqueLTOModule *lto_module_t;
/** opaque reference to a code generator */
-typedef struct LTOCodeGenerator* lto_code_gen_t;
+typedef struct LLVMOpaqueLTOCodeGenerator *lto_code_gen_t;
#ifdef __cplusplus
extern "C" {
@@ -374,6 +375,14 @@ lto_codegen_set_pic_model(lto_code_gen_t cg, lto_codegen_model);
extern void
lto_codegen_set_cpu(lto_code_gen_t cg, const char *cpu);
+/**
+ * Sets attributes for the cpu to generate code for.
+ *
+ * \since LTO_API_VERSION=11
+ */
+extern void
+lto_codegen_set_attr(lto_code_gen_t cg, const char *attr);
+
/**
* Sets the location of the assembler tool to run. If not set, libLTO
@@ -394,8 +403,9 @@ lto_codegen_set_assembler_args(lto_code_gen_t cg, const char **args,
int nargs);
/**
- * Tells LTO optimization passes that this symbol must be preserved
- * because it is referenced by native code or a command line option.
+ * Adds to a list of all global symbols that must exist in the final generated
+ * code. If a function is not listed there, it might be inlined into every usage
+ * and optimized away.
*
* \since prior to LTO_API_VERSION=3
*/
diff --git a/include/llvm-c/module.modulemap b/include/llvm-c/module.modulemap
new file mode 100644
index 0000000..2bcdbc1
--- /dev/null
+++ b/include/llvm-c/module.modulemap
@@ -0,0 +1,5 @@
+module LLVM_C {
+ requires cplusplus
+ umbrella "."
+ module * { export * }
+}
diff --git a/include/llvm/ADT/APFloat.h b/include/llvm/ADT/APFloat.h
index acfefe9..50f1463 100644
--- a/include/llvm/ADT/APFloat.h
+++ b/include/llvm/ADT/APFloat.h
@@ -236,19 +236,19 @@ public:
APInt fill(64, type);
return getQNaN(Sem, Negative, &fill);
} else {
- return getQNaN(Sem, Negative, 0);
+ return getQNaN(Sem, Negative, nullptr);
}
}
/// Factory for QNaN values.
static APFloat getQNaN(const fltSemantics &Sem, bool Negative = false,
- const APInt *payload = 0) {
+ const APInt *payload = nullptr) {
return makeNaN(Sem, false, Negative, payload);
}
/// Factory for SNaN values.
static APFloat getSNaN(const fltSemantics &Sem, bool Negative = false,
- const APInt *payload = 0) {
+ const APInt *payload = nullptr) {
return makeNaN(Sem, true, Negative, payload);
}
@@ -500,7 +500,8 @@ private:
void makeLargest(bool Neg = false);
void makeSmallest(bool Neg = false);
- void makeNaN(bool SNaN = false, bool Neg = false, const APInt *fill = 0);
+ void makeNaN(bool SNaN = false, bool Neg = false,
+ const APInt *fill = nullptr);
static APFloat makeNaN(const fltSemantics &Sem, bool SNaN, bool Negative,
const APInt *fill);
void makeInf(bool Neg = false);
diff --git a/include/llvm/ADT/ArrayRef.h b/include/llvm/ADT/ArrayRef.h
index fcf280d..1b64fee 100644
--- a/include/llvm/ADT/ArrayRef.h
+++ b/include/llvm/ADT/ArrayRef.h
@@ -12,7 +12,6 @@
#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/Allocator.h"
#include <vector>
namespace llvm {
@@ -49,10 +48,10 @@ namespace llvm {
/// @{
/// Construct an empty ArrayRef.
- /*implicit*/ ArrayRef() : Data(0), Length(0) {}
+ /*implicit*/ ArrayRef() : Data(nullptr), Length(0) {}
/// Construct an empty ArrayRef from None.
- /*implicit*/ ArrayRef(NoneType) : Data(0), Length(0) {}
+ /*implicit*/ ArrayRef(NoneType) : Data(nullptr), Length(0) {}
/// Construct an ArrayRef from a single element.
/*implicit*/ ArrayRef(const T &OneElt)
@@ -121,9 +120,9 @@ namespace llvm {
return Data[Length-1];
}
- // copy - Allocate copy in BumpPtrAllocator and return ArrayRef<T> to it.
- ArrayRef<T> copy(BumpPtrAllocator &Allocator) {
- T *Buff = Allocator.Allocate<T>(Length);
+ // copy - Allocate copy in Allocator and return ArrayRef<T> to it.
+ template <typename Allocator> ArrayRef<T> copy(Allocator &A) {
+ T *Buff = A.template Allocate<T>(Length);
std::copy(begin(), end(), Buff);
return ArrayRef<T>(Buff, Length);
}
@@ -132,10 +131,7 @@ namespace llvm {
bool equals(ArrayRef RHS) const {
if (Length != RHS.Length)
return false;
- for (size_type i = 0; i != Length; i++)
- if (Data[i] != RHS.Data[i])
- return false;
- return true;
+ return std::equal(begin(), end(), RHS.begin());
}
/// slice(n) - Chop off the first N elements of the array.
@@ -221,7 +217,7 @@ namespace llvm {
/// Construct an MutableArrayRef from a C array.
template <size_t N>
- /*implicit*/ MutableArrayRef(T (&Arr)[N])
+ /*implicit*/ LLVM_CONSTEXPR MutableArrayRef(T (&Arr)[N])
: ArrayRef<T>(Arr) {}
T *data() const { return const_cast<T*>(ArrayRef<T>::data()); }
diff --git a/include/llvm/ADT/BitVector.h b/include/llvm/ADT/BitVector.h
index b531820..da2b3ad 100644
--- a/include/llvm/ADT/BitVector.h
+++ b/include/llvm/ADT/BitVector.h
@@ -72,7 +72,7 @@ public:
/// BitVector default ctor - Creates an empty bitvector.
BitVector() : Size(0), Capacity(0) {
- Bits = 0;
+ Bits = nullptr;
}
/// BitVector ctor - Creates a bitvector of specified number of bits. All
@@ -88,7 +88,7 @@ public:
/// BitVector copy ctor.
BitVector(const BitVector &RHS) : Size(RHS.size()) {
if (Size == 0) {
- Bits = 0;
+ Bits = nullptr;
Capacity = 0;
return;
}
@@ -100,7 +100,7 @@ public:
BitVector(BitVector &&RHS)
: Bits(RHS.Bits), Size(RHS.Size), Capacity(RHS.Capacity) {
- RHS.Bits = 0;
+ RHS.Bits = nullptr;
}
~BitVector() {
@@ -467,7 +467,7 @@ public:
Size = RHS.Size;
Capacity = RHS.Capacity;
- RHS.Bits = 0;
+ RHS.Bits = nullptr;
return *this;
}
diff --git a/include/llvm/ADT/DenseMap.h b/include/llvm/ADT/DenseMap.h
index 037989f..8269132 100644
--- a/include/llvm/ADT/DenseMap.h
+++ b/include/llvm/ADT/DenseMap.h
@@ -461,12 +461,12 @@ private:
const unsigned NumBuckets = getNumBuckets();
if (NumBuckets == 0) {
- FoundBucket = 0;
+ FoundBucket = nullptr;
return false;
}
// FoundTombstone - Keep track of whether we find a tombstone while probing.
- const BucketT *FoundTombstone = 0;
+ const BucketT *FoundTombstone = nullptr;
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
@@ -665,7 +665,7 @@ private:
bool allocateBuckets(unsigned Num) {
NumBuckets = Num;
if (NumBuckets == 0) {
- Buckets = 0;
+ Buckets = nullptr;
return false;
}
@@ -985,7 +985,7 @@ public:
private:
pointer Ptr, End;
public:
- DenseMapIterator() : Ptr(0), End(0) {}
+ DenseMapIterator() : Ptr(nullptr), End(nullptr) {}
DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false)
: Ptr(Pos), End(E) {
diff --git a/include/llvm/ADT/DepthFirstIterator.h b/include/llvm/ADT/DepthFirstIterator.h
index 6445442..dfba43f 100644
--- a/include/llvm/ADT/DepthFirstIterator.h
+++ b/include/llvm/ADT/DepthFirstIterator.h
@@ -33,6 +33,7 @@
#ifndef LLVM_ADT_DEPTHFIRSTITERATOR_H
#define LLVM_ADT_DEPTHFIRSTITERATOR_H
+#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -207,6 +208,12 @@ df_iterator<T> df_end(const T& G) {
return df_iterator<T>::end(G);
}
+// Provide an accessor method to use them in range-based patterns.
+template <class T>
+iterator_range<df_iterator<T>> depth_first(const T& G) {
+ return iterator_range<df_iterator<T>>(df_begin(G), df_end(G));
+}
+
// Provide global definitions of external depth first iterators...
template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeType*> >
struct df_ext_iterator : public df_iterator<T, SetTy, true> {
@@ -244,6 +251,12 @@ idf_iterator<T> idf_end(const T& G){
return idf_iterator<T>::end(Inverse<T>(G));
}
+// Provide an accessor method to use them in range-based patterns.
+template <class T>
+iterator_range<idf_iterator<T>> inverse_depth_first(const T& G) {
+ return iterator_range<idf_iterator<T>>(idf_begin(G), idf_end(G));
+}
+
// Provide global definitions of external inverse depth first iterators...
template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeType*> >
struct idf_ext_iterator : public idf_iterator<T, SetTy, true> {
diff --git a/include/llvm/ADT/EquivalenceClasses.h b/include/llvm/ADT/EquivalenceClasses.h
index 2256ee7..e0396c7 100644
--- a/include/llvm/ADT/EquivalenceClasses.h
+++ b/include/llvm/ADT/EquivalenceClasses.h
@@ -86,14 +86,14 @@ class EquivalenceClasses {
}
void setNext(const ECValue *NewNext) const {
- assert(getNext() == 0 && "Already has a next pointer!");
+ assert(getNext() == nullptr && "Already has a next pointer!");
Next = (const ECValue*)((intptr_t)NewNext | (intptr_t)isLeader());
}
public:
ECValue(const ECValue &RHS) : Leader(this), Next((ECValue*)(intptr_t)1),
Data(RHS.Data) {
// Only support copying of singleton nodes.
- assert(RHS.isLeader() && RHS.getNext() == 0 && "Not a singleton!");
+ assert(RHS.isLeader() && RHS.getNext() == nullptr && "Not a singleton!");
}
bool operator<(const ECValue &UFN) const { return Data < UFN.Data; }
@@ -147,10 +147,10 @@ public:
class member_iterator;
member_iterator member_begin(iterator I) const {
// Only leaders provide anything to iterate over.
- return member_iterator(I->isLeader() ? &*I : 0);
+ return member_iterator(I->isLeader() ? &*I : nullptr);
}
member_iterator member_end() const {
- return member_iterator(0);
+ return member_iterator(nullptr);
}
/// findValue - Return an iterator to the specified value. If it does not
@@ -251,13 +251,13 @@ public:
explicit member_iterator(const ECValue *N) : Node(N) {}
reference operator*() const {
- assert(Node != 0 && "Dereferencing end()!");
+ assert(Node != nullptr && "Dereferencing end()!");
return Node->getData();
}
reference operator->() const { return operator*(); }
member_iterator &operator++() {
- assert(Node != 0 && "++'d off the end of the list!");
+ assert(Node != nullptr && "++'d off the end of the list!");
Node = Node->getNext();
return *this;
}
diff --git a/include/llvm/ADT/FoldingSet.h b/include/llvm/ADT/FoldingSet.h
index 188010d..9b7ee85 100644
--- a/include/llvm/ADT/FoldingSet.h
+++ b/include/llvm/ADT/FoldingSet.h
@@ -137,7 +137,7 @@ public:
public:
- Node() : NextInFoldingSetBucket(0) {}
+ Node() : NextInFoldingSetBucket(nullptr) {}
// Accessors
void *getNextInBucket() const { return NextInFoldingSetBucket; }
@@ -269,7 +269,7 @@ class FoldingSetNodeIDRef {
const unsigned *Data;
size_t Size;
public:
- FoldingSetNodeIDRef() : Data(0), Size(0) {}
+ FoldingSetNodeIDRef() : Data(nullptr), Size(0) {}
FoldingSetNodeIDRef(const unsigned *D, size_t S) : Data(D), Size(S) {}
/// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef,
diff --git a/include/llvm/ADT/Hashing.h b/include/llvm/ADT/Hashing.h
index 4bffd8e..b11e3c1 100644
--- a/include/llvm/ADT/Hashing.h
+++ b/include/llvm/ADT/Hashing.h
@@ -45,7 +45,6 @@
#ifndef LLVM_ADT_HASHING_H
#define LLVM_ADT_HASHING_H
-#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/SwapByteOrder.h"
@@ -266,7 +265,6 @@ inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
/// keeps 56 bytes of arbitrary state.
struct hash_state {
uint64_t h0, h1, h2, h3, h4, h5, h6;
- uint64_t seed;
/// \brief Create a new hash_state structure and initialize it based on the
/// seed and the first 64-byte chunk.
@@ -274,7 +272,7 @@ struct hash_state {
static hash_state create(const char *s, uint64_t seed) {
hash_state state = {
0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49),
- seed * k1, shift_mix(seed), 0, seed };
+ seed * k1, shift_mix(seed), 0 };
state.h6 = hash_16_bytes(state.h4, state.h5);
state.mix(s);
return state;
@@ -412,7 +410,7 @@ template <typename InputIteratorT>
hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
const size_t seed = get_execution_seed();
char buffer[64], *buffer_ptr = buffer;
- char *const buffer_end = buffer_ptr + array_lengthof(buffer);
+ char *const buffer_end = std::end(buffer);
while (first != last && store_and_advance(buffer_ptr, buffer_end,
get_hashable_data(*first)))
++first;
diff --git a/include/llvm/ADT/ImmutableIntervalMap.h b/include/llvm/ADT/ImmutableIntervalMap.h
deleted file mode 100644
index 6793c6b..0000000
--- a/include/llvm/ADT/ImmutableIntervalMap.h
+++ /dev/null
@@ -1,248 +0,0 @@
-//===--- ImmutableIntervalMap.h - Immutable (functional) map ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the ImmutableIntervalMap class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ADT_IMMUTABLEINTERVALMAP_H
-#define LLVM_ADT_IMMUTABLEINTERVALMAP_H
-
-#include "llvm/ADT/ImmutableMap.h"
-
-namespace llvm {
-
-class Interval {
-private:
- int64_t Start;
- int64_t End;
-
-public:
- Interval(int64_t S, int64_t E) : Start(S), End(E) {}
-
- int64_t getStart() const { return Start; }
- int64_t getEnd() const { return End; }
-};
-
-template <typename T>
-struct ImutIntervalInfo {
- typedef const std::pair<Interval, T> value_type;
- typedef const value_type &value_type_ref;
- typedef const Interval key_type;
- typedef const Interval &key_type_ref;
- typedef const T data_type;
- typedef const T &data_type_ref;
-
- static key_type_ref KeyOfValue(value_type_ref V) {
- return V.first;
- }
-
- static data_type_ref DataOfValue(value_type_ref V) {
- return V.second;
- }
-
- static bool isEqual(key_type_ref L, key_type_ref R) {
- return L.getStart() == R.getStart() && L.getEnd() == R.getEnd();
- }
-
- static bool isDataEqual(data_type_ref L, data_type_ref R) {
- return ImutContainerInfo<T>::isEqual(L,R);
- }
-
- static bool isLess(key_type_ref L, key_type_ref R) {
- // Assume L and R does not overlap.
- if (L.getStart() < R.getStart()) {
- assert(L.getEnd() < R.getStart());
- return true;
- } else if (L.getStart() == R.getStart()) {
- assert(L.getEnd() == R.getEnd());
- return false;
- } else {
- assert(L.getStart() > R.getEnd());
- return false;
- }
- }
-
- static bool isContainedIn(key_type_ref K, key_type_ref L) {
- if (K.getStart() >= L.getStart() && K.getEnd() <= L.getEnd())
- return true;
- else
- return false;
- }
-
- static void Profile(FoldingSetNodeID &ID, value_type_ref V) {
- ID.AddInteger(V.first.getStart());
- ID.AddInteger(V.first.getEnd());
- ImutProfileInfo<T>::Profile(ID, V.second);
- }
-};
-
-template <typename ImutInfo>
-class ImutIntervalAVLFactory : public ImutAVLFactory<ImutInfo> {
- typedef ImutAVLTree<ImutInfo> TreeTy;
- typedef typename ImutInfo::value_type value_type;
- typedef typename ImutInfo::value_type_ref value_type_ref;
- typedef typename ImutInfo::key_type key_type;
- typedef typename ImutInfo::key_type_ref key_type_ref;
- typedef typename ImutInfo::data_type data_type;
- typedef typename ImutInfo::data_type_ref data_type_ref;
-
-public:
- ImutIntervalAVLFactory(BumpPtrAllocator &Alloc)
- : ImutAVLFactory<ImutInfo>(Alloc) {}
-
- TreeTy *Add(TreeTy *T, value_type_ref V) {
- T = add_internal(V,T);
- this->MarkImmutable(T);
- return T;
- }
-
- TreeTy *Find(TreeTy *T, key_type_ref K) {
- if (!T)
- return NULL;
-
- key_type_ref CurrentKey = ImutInfo::KeyOfValue(this->getValue(T));
-
- if (ImutInfo::isContainedIn(K, CurrentKey))
- return T;
- else if (ImutInfo::isLess(K, CurrentKey))
- return Find(this->getLeft(T), K);
- else
- return Find(this->getRight(T), K);
- }
-
-private:
- TreeTy *add_internal(value_type_ref V, TreeTy *T) {
- key_type_ref K = ImutInfo::KeyOfValue(V);
- T = removeAllOverlaps(T, K);
- if (this->isEmpty(T))
- return this->CreateNode(NULL, V, NULL);
-
- assert(!T->isMutable());
-
- key_type_ref KCurrent = ImutInfo::KeyOfValue(this->Value(T));
-
- if (ImutInfo::isLess(K, KCurrent))
- return this->Balance(add_internal(V, this->Left(T)), this->Value(T),
- this->Right(T));
- else
- return this->Balance(this->Left(T), this->Value(T),
- add_internal(V, this->Right(T)));
- }
-
- // Remove all overlaps from T.
- TreeTy *removeAllOverlaps(TreeTy *T, key_type_ref K) {
- bool Changed;
- do {
- Changed = false;
- T = removeOverlap(T, K, Changed);
- this->markImmutable(T);
- } while (Changed);
-
- return T;
- }
-
- // Remove one overlap from T.
- TreeTy *removeOverlap(TreeTy *T, key_type_ref K, bool &Changed) {
- if (!T)
- return NULL;
- Interval CurrentK = ImutInfo::KeyOfValue(this->Value(T));
-
- // If current key does not overlap the inserted key.
- if (CurrentK.getStart() > K.getEnd())
- return this->Balance(removeOverlap(this->Left(T), K, Changed),
- this->Value(T), this->Right(T));
- else if (CurrentK.getEnd() < K.getStart())
- return this->Balance(this->Left(T), this->Value(T),
- removeOverlap(this->Right(T), K, Changed));
-
- // Current key overlaps with the inserted key.
- // Remove the current key.
- Changed = true;
- data_type_ref OldData = ImutInfo::DataOfValue(this->Value(T));
- T = this->Remove_internal(CurrentK, T);
- // Add back the unoverlapped part of the current key.
- if (CurrentK.getStart() < K.getStart()) {
- if (CurrentK.getEnd() <= K.getEnd()) {
- Interval NewK(CurrentK.getStart(), K.getStart()-1);
- return add_internal(std::make_pair(NewK, OldData), T);
- } else {
- Interval NewK1(CurrentK.getStart(), K.getStart()-1);
- T = add_internal(std::make_pair(NewK1, OldData), T);
-
- Interval NewK2(K.getEnd()+1, CurrentK.getEnd());
- return add_internal(std::make_pair(NewK2, OldData), T);
- }
- } else {
- if (CurrentK.getEnd() > K.getEnd()) {
- Interval NewK(K.getEnd()+1, CurrentK.getEnd());
- return add_internal(std::make_pair(NewK, OldData), T);
- } else
- return T;
- }
- }
-};
-
-/// ImmutableIntervalMap maps an interval [start, end] to a value. The intervals
-/// in the map are guaranteed to be disjoint.
-template <typename ValT>
-class ImmutableIntervalMap
- : public ImmutableMap<Interval, ValT, ImutIntervalInfo<ValT> > {
-
- typedef typename ImutIntervalInfo<ValT>::value_type value_type;
- typedef typename ImutIntervalInfo<ValT>::value_type_ref value_type_ref;
- typedef typename ImutIntervalInfo<ValT>::key_type key_type;
- typedef typename ImutIntervalInfo<ValT>::key_type_ref key_type_ref;
- typedef typename ImutIntervalInfo<ValT>::data_type data_type;
- typedef typename ImutIntervalInfo<ValT>::data_type_ref data_type_ref;
- typedef ImutAVLTree<ImutIntervalInfo<ValT> > TreeTy;
-
-public:
- explicit ImmutableIntervalMap(TreeTy *R)
- : ImmutableMap<Interval, ValT, ImutIntervalInfo<ValT> >(R) {}
-
- class Factory {
- ImutIntervalAVLFactory<ImutIntervalInfo<ValT> > F;
-
- public:
- Factory(BumpPtrAllocator& Alloc) : F(Alloc) {}
-
- ImmutableIntervalMap getEmptyMap() {
- return ImmutableIntervalMap(F.getEmptyTree());
- }
-
- ImmutableIntervalMap add(ImmutableIntervalMap Old,
- key_type_ref K, data_type_ref D) {
- TreeTy *T = F.add(Old.Root, std::pair<key_type, data_type>(K, D));
- return ImmutableIntervalMap(F.getCanonicalTree(T));
- }
-
- ImmutableIntervalMap remove(ImmutableIntervalMap Old, key_type_ref K) {
- TreeTy *T = F.remove(Old.Root, K);
- return ImmutableIntervalMap(F.getCanonicalTree(T));
- }
-
- data_type *lookup(ImmutableIntervalMap M, key_type_ref K) {
- TreeTy *T = F.Find(M.getRoot(), K);
- if (T)
- return &T->getValue().second;
- else
- return 0;
- }
- };
-
-private:
- // For ImmutableIntervalMap, the lookup operation has to be done by the
- // factory.
- data_type* lookup(key_type_ref K) const;
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/include/llvm/ADT/ImmutableMap.h b/include/llvm/ADT/ImmutableMap.h
index 8f8fb98..11f281b 100644
--- a/include/llvm/ADT/ImmutableMap.h
+++ b/include/llvm/ADT/ImmutableMap.h
@@ -241,14 +241,14 @@ public:
if (T) return &T->getValue().second;
}
- return 0;
+ return nullptr;
}
/// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
/// which key is the highest in the ordering of keys in the map. This
/// method returns NULL if the map is empty.
value_type* getMaxElement() const {
- return Root ? &(Root->getMaxElement()->getValue()) : 0;
+ return Root ? &(Root->getMaxElement()->getValue()) : nullptr;
}
//===--------------------------------------------------===//
diff --git a/include/llvm/ADT/ImmutableSet.h b/include/llvm/ADT/ImmutableSet.h
index ad34969..5a3d8ad 100644
--- a/include/llvm/ADT/ImmutableSet.h
+++ b/include/llvm/ADT/ImmutableSet.h
@@ -81,7 +81,7 @@ public:
else
T = T->getRight();
}
- return NULL;
+ return nullptr;
}
/// getMaxElement - Find the subtree associated with the highest ranged
@@ -242,9 +242,9 @@ private:
/// ImutAVLFactory.
ImutAVLTree(Factory *f, ImutAVLTree* l, ImutAVLTree* r, value_type_ref v,
unsigned height)
- : factory(f), left(l), right(r), prev(0), next(0), height(height),
- IsMutable(true), IsDigestCached(false), IsCanonicalized(0),
- value(v), digest(0), refCount(0)
+ : factory(f), left(l), right(r), prev(nullptr), next(nullptr),
+ height(height), IsMutable(true), IsDigestCached(false),
+ IsCanonicalized(0), value(v), digest(0), refCount(0)
{
if (left) left->retain();
if (right) right->retain();
@@ -411,7 +411,7 @@ public:
return T;
}
- TreeTy* getEmptyTree() const { return NULL; }
+ TreeTy* getEmptyTree() const { return nullptr; }
protected:
@@ -607,7 +607,7 @@ protected:
public:
TreeTy *getCanonicalTree(TreeTy *TNew) {
if (!TNew)
- return 0;
+ return nullptr;
if (TNew->IsCanonicalized)
return TNew;
@@ -619,7 +619,7 @@ public:
do {
if (!entry)
break;
- for (TreeTy *T = entry ; T != 0; T = T->next) {
+ for (TreeTy *T = entry ; T != nullptr; T = T->next) {
// Compare the Contents('T') with Contents('TNew')
typename TreeTy::iterator TI = T->begin(), TE = T->end();
if (!compareTreeWithSection(TNew, TI, TE))
@@ -696,12 +696,7 @@ public:
}
inline bool operator==(const _Self& x) const {
- if (stack.size() != x.stack.size())
- return false;
- for (unsigned i = 0 ; i < stack.size(); i++)
- if (stack[i] != x.stack[i])
- return false;
- return true;
+ return stack == x.stack;
}
inline bool operator!=(const _Self& x) const { return !operator==(x); }
diff --git a/include/llvm/ADT/IntervalMap.h b/include/llvm/ADT/IntervalMap.h
index 1ca3288..46549ee 100644
--- a/include/llvm/ADT/IntervalMap.h
+++ b/include/llvm/ADT/IntervalMap.h
@@ -1177,7 +1177,7 @@ branchRoot(unsigned Position) {
if (Nodes == 1)
size[0] = rootSize;
else
- NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, NULL, size,
+ NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, nullptr, size,
Position, true);
// Allocate new nodes.
@@ -1218,7 +1218,7 @@ splitRoot(unsigned Position) {
if (Nodes == 1)
Size[0] = rootSize;
else
- NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, NULL, Size,
+ NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, nullptr, Size,
Position, true);
// Allocate new nodes.
@@ -1346,7 +1346,7 @@ protected:
public:
/// const_iterator - Create an iterator that isn't pointing anywhere.
- const_iterator() : map(0) {}
+ const_iterator() : map(nullptr) {}
/// setMap - Change the map iterated over. This call must be followed by a
/// call to goToBegin(), goToEnd(), or find()
diff --git a/include/llvm/ADT/IntrusiveRefCntPtr.h b/include/llvm/ADT/IntrusiveRefCntPtr.h
index 729e37f..cd1946c 100644
--- a/include/llvm/ADT/IntrusiveRefCntPtr.h
+++ b/include/llvm/ADT/IntrusiveRefCntPtr.h
@@ -139,7 +139,7 @@ public:
public:
typedef T element_type;
- explicit IntrusiveRefCntPtr() : Obj(0) {}
+ explicit IntrusiveRefCntPtr() : Obj(nullptr) {}
IntrusiveRefCntPtr(T* obj) : Obj(obj) {
retain();
@@ -150,7 +150,7 @@ public:
}
IntrusiveRefCntPtr(IntrusiveRefCntPtr&& S) : Obj(S.Obj) {
- S.Obj = 0;
+ S.Obj = nullptr;
}
template <class X>
@@ -179,7 +179,7 @@ public:
typedef T* (IntrusiveRefCntPtr::*unspecified_bool_type) () const;
operator unspecified_bool_type() const {
- return Obj == 0 ? 0 : &IntrusiveRefCntPtr::getPtr;
+ return Obj ? &IntrusiveRefCntPtr::getPtr : nullptr;
}
void swap(IntrusiveRefCntPtr& other) {
@@ -190,7 +190,7 @@ public:
void reset() {
release();
- Obj = 0;
+ Obj = nullptr;
}
void resetWithoutRelease() {
diff --git a/include/llvm/ADT/OwningPtr.h b/include/llvm/ADT/OwningPtr.h
index 034bcfd..5e83358 100644
--- a/include/llvm/ADT/OwningPtr.h
+++ b/include/llvm/ADT/OwningPtr.h
@@ -69,7 +69,7 @@ public:
/// not delete the pointer before returning it.
T *take() {
T *Tmp = Ptr;
- Ptr = 0;
+ Ptr = nullptr;
return Tmp;
}
@@ -84,9 +84,9 @@ public:
T *operator->() const { return Ptr; }
T *get() const { return Ptr; }
- LLVM_EXPLICIT operator bool() const { return Ptr != 0; }
- bool operator!() const { return Ptr == 0; }
- bool isValid() const { return Ptr != 0; }
+ LLVM_EXPLICIT operator bool() const { return Ptr != nullptr; }
+ bool operator!() const { return Ptr == nullptr; }
+ bool isValid() const { return Ptr != nullptr; }
void swap(OwningPtr &RHS) {
T *Tmp = RHS.Ptr;
@@ -146,7 +146,7 @@ public:
T *get() const { return Ptr; }
LLVM_EXPLICIT operator bool() const { return Ptr != 0; }
- bool operator!() const { return Ptr == 0; }
+ bool operator!() const { return Ptr == nullptr; }
void swap(OwningArrayPtr &RHS) {
T *Tmp = RHS.Ptr;
diff --git a/include/llvm/ADT/PointerUnion.h b/include/llvm/ADT/PointerUnion.h
index 8cbe8d1..a6dddd2 100644
--- a/include/llvm/ADT/PointerUnion.h
+++ b/include/llvm/ADT/PointerUnion.h
@@ -15,6 +15,7 @@
#ifndef LLVM_ADT_POINTERUNION_H
#define LLVM_ADT_POINTERUNION_H
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/Support/Compiler.h"
@@ -153,6 +154,12 @@ namespace llvm {
"Can't get the address because PointerLikeTypeTraits changes the ptr");
return (PT1 *)Val.getAddrOfPointer();
}
+
+ /// \brief Assignment from nullptr which just clears the union.
+ const PointerUnion &operator=(std::nullptr_t) {
+ Val.initWithPointer(nullptr);
+ return *this;
+ }
/// Assignment operators - Allow assigning into this union from either
/// pointer type, setting the discriminator to remember what it came from.
@@ -297,6 +304,12 @@ namespace llvm {
if (is<T>()) return get<T>();
return T();
}
+
+ /// \brief Assignment from nullptr which just clears the union.
+ const PointerUnion3 &operator=(std::nullptr_t) {
+ Val = nullptr;
+ return *this;
+ }
/// Assignment operators - Allow assigning into this union from either
/// pointer type, setting the discriminator to remember what it came from.
@@ -406,6 +419,12 @@ namespace llvm {
if (is<T>()) return get<T>();
return T();
}
+
+ /// \brief Assignment from nullptr which just clears the union.
+ const PointerUnion4 &operator=(std::nullptr_t) {
+ Val = nullptr;
+ return *this;
+ }
/// Assignment operators - Allow assigning into this union from either
/// pointer type, setting the discriminator to remember what it came from.
@@ -455,6 +474,33 @@ namespace llvm {
::NumLowBitsAvailable
};
};
+
+ // Teach DenseMap how to use PointerUnions as keys.
+ template<typename T, typename U>
+ struct DenseMapInfo<PointerUnion<T, U> > {
+ typedef PointerUnion<T, U> Pair;
+ typedef DenseMapInfo<T> FirstInfo;
+ typedef DenseMapInfo<U> SecondInfo;
+
+ static inline Pair getEmptyKey() {
+ return Pair(FirstInfo::getEmptyKey());
+ }
+ static inline Pair getTombstoneKey() {
+ return Pair(FirstInfo::getTombstoneKey());
+ }
+ static unsigned getHashValue(const Pair &PairVal) {
+ intptr_t key = (intptr_t)PairVal.getOpaqueValue();
+ return DenseMapInfo<intptr_t>::getHashValue(key);
+ }
+ static bool isEqual(const Pair &LHS, const Pair &RHS) {
+ return LHS.template is<T>() == RHS.template is<T>() &&
+ (LHS.template is<T>() ?
+ FirstInfo::isEqual(LHS.template get<T>(),
+ RHS.template get<T>()) :
+ SecondInfo::isEqual(LHS.template get<U>(),
+ RHS.template get<U>()));
+ }
+ };
}
#endif
diff --git a/include/llvm/ADT/PostOrderIterator.h b/include/llvm/ADT/PostOrderIterator.h
index 59fa3f3..dd8cc74 100644
--- a/include/llvm/ADT/PostOrderIterator.h
+++ b/include/llvm/ADT/PostOrderIterator.h
@@ -111,7 +111,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
}
inline po_iterator(NodeType *BB) {
- this->insertEdge((NodeType*)0, BB);
+ this->insertEdge((NodeType*)nullptr, BB);
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
@@ -119,7 +119,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
inline po_iterator(NodeType *BB, SetType &S) :
po_iterator_storage<SetType, ExtStorage>(S) {
- if (this->insertEdge((NodeType*)0, BB)) {
+ if (this->insertEdge((NodeType*)nullptr, BB)) {
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
diff --git a/include/llvm/ADT/SCCIterator.h b/include/llvm/ADT/SCCIterator.h
index 58ac149..bc74416 100644
--- a/include/llvm/ADT/SCCIterator.h
+++ b/include/llvm/ADT/SCCIterator.h
@@ -25,6 +25,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/iterator.h"
#include <vector>
namespace llvm {
@@ -35,19 +36,17 @@ namespace llvm {
/// This is implemented using Tarjan's DFS algorithm using an internal stack to
/// build up a vector of nodes in a particular SCC. Note that it is a forward
/// iterator and thus you cannot backtrack or re-visit nodes.
-template <class GraphT, class GT = GraphTraits<GraphT> >
+template <class GraphT, class GT = GraphTraits<GraphT>>
class scc_iterator
- : public std::iterator<std::forward_iterator_tag,
- std::vector<typename GT::NodeType>, ptrdiff_t> {
+ : public iterator_facade_base<
+ scc_iterator<GraphT, GT>, std::forward_iterator_tag,
+ const std::vector<typename GT::NodeType *>, ptrdiff_t> {
typedef typename GT::NodeType NodeType;
typedef typename GT::ChildIteratorType ChildItTy;
typedef std::vector<NodeType *> SccTy;
- typedef std::iterator<std::forward_iterator_tag,
- std::vector<typename GT::NodeType>, ptrdiff_t> super;
- typedef typename super::reference reference;
- typedef typename super::pointer pointer;
+ typedef typename scc_iterator::reference reference;
- // Element of VisitStack during DFS.
+ /// Element of VisitStack during DFS.
struct StackElement {
NodeType *Node; ///< The current node pointer.
ChildItTy NextChild; ///< The next child, modified inplace during DFS.
@@ -63,135 +62,63 @@ class scc_iterator
}
};
- // The visit counters used to detect when a complete SCC is on the stack.
- // visitNum is the global counter.
- // nodeVisitNumbers are per-node visit numbers, also used as DFS flags.
+ /// The visit counters used to detect when a complete SCC is on the stack.
+ /// visitNum is the global counter.
+ ///
+ /// nodeVisitNumbers are per-node visit numbers, also used as DFS flags.
unsigned visitNum;
DenseMap<NodeType *, unsigned> nodeVisitNumbers;
- // Stack holding nodes of the SCC.
+ /// Stack holding nodes of the SCC.
std::vector<NodeType *> SCCNodeStack;
- // The current SCC, retrieved using operator*().
+ /// The current SCC, retrieved using operator*().
SccTy CurrentSCC;
-
- // DFS stack, Used to maintain the ordering. The top contains the current
- // node, the next child to visit, and the minimum uplink value of all child
+ /// DFS stack, Used to maintain the ordering. The top contains the current
+ /// node, the next child to visit, and the minimum uplink value of all child
std::vector<StackElement> VisitStack;
- // A single "visit" within the non-recursive DFS traversal.
- void DFSVisitOne(NodeType *N) {
- ++visitNum;
- nodeVisitNumbers[N] = visitNum;
- SCCNodeStack.push_back(N);
- VisitStack.push_back(StackElement(N, GT::child_begin(N), visitNum));
-#if 0 // Enable if needed when debugging.
- dbgs() << "TarjanSCC: Node " << N <<
- " : visitNum = " << visitNum << "\n";
-#endif
- }
+ /// A single "visit" within the non-recursive DFS traversal.
+ void DFSVisitOne(NodeType *N);
- // The stack-based DFS traversal; defined below.
- void DFSVisitChildren() {
- assert(!VisitStack.empty());
- while (VisitStack.back().NextChild !=
- GT::child_end(VisitStack.back().Node)) {
- // TOS has at least one more child so continue DFS
- NodeType *childN = *VisitStack.back().NextChild++;
- typename DenseMap<NodeType *, unsigned>::iterator Visited =
- nodeVisitNumbers.find(childN);
- if (Visited == nodeVisitNumbers.end()) {
- // this node has never been seen.
- DFSVisitOne(childN);
- continue;
- }
-
- unsigned childNum = Visited->second;
- if (VisitStack.back().MinVisited > childNum)
- VisitStack.back().MinVisited = childNum;
- }
- }
+ /// The stack-based DFS traversal; defined below.
+ void DFSVisitChildren();
- // Compute the next SCC using the DFS traversal.
- void GetNextSCC() {
- CurrentSCC.clear(); // Prepare to compute the next SCC
- while (!VisitStack.empty()) {
- DFSVisitChildren();
+ /// Compute the next SCC using the DFS traversal.
+ void GetNextSCC();
- // Pop the leaf on top of the VisitStack.
- NodeType *visitingN = VisitStack.back().Node;
- unsigned minVisitNum = VisitStack.back().MinVisited;
- assert(VisitStack.back().NextChild == GT::child_end(visitingN));
- VisitStack.pop_back();
-
- // Propagate MinVisitNum to parent so we can detect the SCC starting node.
- if (!VisitStack.empty() && VisitStack.back().MinVisited > minVisitNum)
- VisitStack.back().MinVisited = minVisitNum;
-
-#if 0 // Enable if needed when debugging.
- dbgs() << "TarjanSCC: Popped node " << visitingN <<
- " : minVisitNum = " << minVisitNum << "; Node visit num = " <<
- nodeVisitNumbers[visitingN] << "\n";
-#endif
-
- if (minVisitNum != nodeVisitNumbers[visitingN])
- continue;
-
- // A full SCC is on the SCCNodeStack! It includes all nodes below
- // visitingN on the stack. Copy those nodes to CurrentSCC,
- // reset their minVisit values, and return (this suspends
- // the DFS traversal till the next ++).
- do {
- CurrentSCC.push_back(SCCNodeStack.back());
- SCCNodeStack.pop_back();
- nodeVisitNumbers[CurrentSCC.back()] = ~0U;
- } while (CurrentSCC.back() != visitingN);
- return;
- }
- }
-
- inline scc_iterator(NodeType *entryN) : visitNum(0) {
+ scc_iterator(NodeType *entryN) : visitNum(0) {
DFSVisitOne(entryN);
GetNextSCC();
}
- // End is when the DFS stack is empty.
- inline scc_iterator() {}
+ /// End is when the DFS stack is empty.
+ scc_iterator() {}
public:
- static inline scc_iterator begin(const GraphT &G) {
+ static scc_iterator begin(const GraphT &G) {
return scc_iterator(GT::getEntryNode(G));
}
- static inline scc_iterator end(const GraphT &) { return scc_iterator(); }
+ static scc_iterator end(const GraphT &) { return scc_iterator(); }
/// \brief Direct loop termination test which is more efficient than
/// comparison with \c end().
- inline bool isAtEnd() const {
+ bool isAtEnd() const {
assert(!CurrentSCC.empty() || VisitStack.empty());
return CurrentSCC.empty();
}
- inline bool operator==(const scc_iterator &x) const {
+ bool operator==(const scc_iterator &x) const {
return VisitStack == x.VisitStack && CurrentSCC == x.CurrentSCC;
}
- inline bool operator!=(const scc_iterator &x) const { return !operator==(x); }
- inline scc_iterator &operator++() {
+ scc_iterator &operator++() {
GetNextSCC();
return *this;
}
- inline scc_iterator operator++(int) {
- scc_iterator tmp = *this;
- ++*this;
- return tmp;
- }
- inline const SccTy &operator*() const {
- assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
- return CurrentSCC;
- }
- inline SccTy &operator*() {
+ reference operator*() const {
assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
return CurrentSCC;
}
@@ -200,7 +127,88 @@ public:
///
/// If the SCC has more than one node, this is trivially true. If not, it may
/// still contain a loop if the node has an edge back to itself.
- bool hasLoop() const {
+ bool hasLoop() const;
+
+ /// This informs the \c scc_iterator that the specified \c Old node
+ /// has been deleted, and \c New is to be used in its place.
+ void ReplaceNode(NodeType *Old, NodeType *New) {
+ assert(nodeVisitNumbers.count(Old) && "Old not in scc_iterator?");
+ nodeVisitNumbers[New] = nodeVisitNumbers[Old];
+ nodeVisitNumbers.erase(Old);
+ }
+};
+
+template <class GraphT, class GT>
+void scc_iterator<GraphT, GT>::DFSVisitOne(NodeType *N) {
+ ++visitNum;
+ nodeVisitNumbers[N] = visitNum;
+ SCCNodeStack.push_back(N);
+ VisitStack.push_back(StackElement(N, GT::child_begin(N), visitNum));
+#if 0 // Enable if needed when debugging.
+ dbgs() << "TarjanSCC: Node " << N <<
+ " : visitNum = " << visitNum << "\n";
+#endif
+}
+
+template <class GraphT, class GT>
+void scc_iterator<GraphT, GT>::DFSVisitChildren() {
+ assert(!VisitStack.empty());
+ while (VisitStack.back().NextChild != GT::child_end(VisitStack.back().Node)) {
+ // TOS has at least one more child so continue DFS
+ NodeType *childN = *VisitStack.back().NextChild++;
+ typename DenseMap<NodeType *, unsigned>::iterator Visited =
+ nodeVisitNumbers.find(childN);
+ if (Visited == nodeVisitNumbers.end()) {
+ // this node has never been seen.
+ DFSVisitOne(childN);
+ continue;
+ }
+
+ unsigned childNum = Visited->second;
+ if (VisitStack.back().MinVisited > childNum)
+ VisitStack.back().MinVisited = childNum;
+ }
+}
+
+template <class GraphT, class GT> void scc_iterator<GraphT, GT>::GetNextSCC() {
+ CurrentSCC.clear(); // Prepare to compute the next SCC
+ while (!VisitStack.empty()) {
+ DFSVisitChildren();
+
+ // Pop the leaf on top of the VisitStack.
+ NodeType *visitingN = VisitStack.back().Node;
+ unsigned minVisitNum = VisitStack.back().MinVisited;
+ assert(VisitStack.back().NextChild == GT::child_end(visitingN));
+ VisitStack.pop_back();
+
+ // Propagate MinVisitNum to parent so we can detect the SCC starting node.
+ if (!VisitStack.empty() && VisitStack.back().MinVisited > minVisitNum)
+ VisitStack.back().MinVisited = minVisitNum;
+
+#if 0 // Enable if needed when debugging.
+ dbgs() << "TarjanSCC: Popped node " << visitingN <<
+ " : minVisitNum = " << minVisitNum << "; Node visit num = " <<
+ nodeVisitNumbers[visitingN] << "\n";
+#endif
+
+ if (minVisitNum != nodeVisitNumbers[visitingN])
+ continue;
+
+ // A full SCC is on the SCCNodeStack! It includes all nodes below
+ // visitingN on the stack. Copy those nodes to CurrentSCC,
+ // reset their minVisit values, and return (this suspends
+ // the DFS traversal till the next ++).
+ do {
+ CurrentSCC.push_back(SCCNodeStack.back());
+ SCCNodeStack.pop_back();
+ nodeVisitNumbers[CurrentSCC.back()] = ~0U;
+ } while (CurrentSCC.back() != visitingN);
+ return;
+ }
+}
+
+template <class GraphT, class GT>
+bool scc_iterator<GraphT, GT>::hasLoop() const {
assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
if (CurrentSCC.size() > 1)
return true;
@@ -212,15 +220,6 @@ public:
return false;
}
- /// This informs the \c scc_iterator that the specified \c Old node
- /// has been deleted, and \c New is to be used in its place.
- void ReplaceNode(NodeType *Old, NodeType *New) {
- assert(nodeVisitNumbers.count(Old) && "Old not in scc_iterator?");
- nodeVisitNumbers[New] = nodeVisitNumbers[Old];
- nodeVisitNumbers.erase(Old);
- }
-};
-
/// \brief Construct the begin iterator for a deduced graph type T.
template <class T> scc_iterator<T> scc_begin(const T &G) {
return scc_iterator<T>::begin(G);
diff --git a/include/llvm/ADT/STLExtras.h b/include/llvm/ADT/STLExtras.h
index ab6884f..1cef393 100644
--- a/include/llvm/ADT/STLExtras.h
+++ b/include/llvm/ADT/STLExtras.h
@@ -55,6 +55,131 @@ struct greater_ptr : public std::binary_function<Ty, Ty, bool> {
}
};
+/// An efficient, type-erasing, non-owning reference to a callable. This is
+/// intended for use as the type of a function parameter that is not used
+/// after the function in question returns.
+///
+/// This class does not own the callable, so it is not in general safe to store
+/// a function_ref.
+template<typename Fn> class function_ref;
+
+#if LLVM_HAS_VARIADIC_TEMPLATES
+
+template<typename Ret, typename ...Params>
+class function_ref<Ret(Params...)> {
+ Ret (*callback)(intptr_t callable, Params ...params);
+ intptr_t callable;
+
+ template<typename Callable>
+ static Ret callback_fn(intptr_t callable, Params ...params) {
+ return (*reinterpret_cast<Callable*>(callable))(
+ std::forward<Params>(params)...);
+ }
+
+public:
+ template<typename Callable>
+ function_ref(Callable &&callable)
+ : callback(callback_fn<typename std::remove_reference<Callable>::type>),
+ callable(reinterpret_cast<intptr_t>(&callable)) {}
+ Ret operator()(Params ...params) const {
+ return callback(callable, std::forward<Params>(params)...);
+ }
+};
+
+#else
+
+template<typename Ret>
+class function_ref<Ret()> {
+ Ret (*callback)(intptr_t callable);
+ intptr_t callable;
+
+ template<typename Callable>
+ static Ret callback_fn(intptr_t callable) {
+ return (*reinterpret_cast<Callable*>(callable))();
+ }
+
+public:
+ template<typename Callable>
+ function_ref(Callable &&callable)
+ : callback(callback_fn<typename std::remove_reference<Callable>::type>),
+ callable(reinterpret_cast<intptr_t>(&callable)) {}
+ Ret operator()() const { return callback(callable); }
+};
+
+template<typename Ret, typename Param1>
+class function_ref<Ret(Param1)> {
+ Ret (*callback)(intptr_t callable, Param1 param1);
+ intptr_t callable;
+
+ template<typename Callable>
+ static Ret callback_fn(intptr_t callable, Param1 param1) {
+ return (*reinterpret_cast<Callable*>(callable))(
+ std::forward<Param1>(param1));
+ }
+
+public:
+ template<typename Callable>
+ function_ref(Callable &&callable)
+ : callback(callback_fn<typename std::remove_reference<Callable>::type>),
+ callable(reinterpret_cast<intptr_t>(&callable)) {}
+ Ret operator()(Param1 param1) {
+ return callback(callable, std::forward<Param1>(param1));
+ }
+};
+
+template<typename Ret, typename Param1, typename Param2>
+class function_ref<Ret(Param1, Param2)> {
+ Ret (*callback)(intptr_t callable, Param1 param1, Param2 param2);
+ intptr_t callable;
+
+ template<typename Callable>
+ static Ret callback_fn(intptr_t callable, Param1 param1, Param2 param2) {
+ return (*reinterpret_cast<Callable*>(callable))(
+ std::forward<Param1>(param1),
+ std::forward<Param2>(param2));
+ }
+
+public:
+ template<typename Callable>
+ function_ref(Callable &&callable)
+ : callback(callback_fn<typename std::remove_reference<Callable>::type>),
+ callable(reinterpret_cast<intptr_t>(&callable)) {}
+ Ret operator()(Param1 param1, Param2 param2) {
+ return callback(callable,
+ std::forward<Param1>(param1),
+ std::forward<Param2>(param2));
+ }
+};
+
+template<typename Ret, typename Param1, typename Param2, typename Param3>
+class function_ref<Ret(Param1, Param2, Param3)> {
+ Ret (*callback)(intptr_t callable, Param1 param1, Param2 param2, Param3 param3);
+ intptr_t callable;
+
+ template<typename Callable>
+ static Ret callback_fn(intptr_t callable, Param1 param1, Param2 param2,
+ Param3 param3) {
+ return (*reinterpret_cast<Callable*>(callable))(
+ std::forward<Param1>(param1),
+ std::forward<Param2>(param2),
+ std::forward<Param3>(param3));
+ }
+
+public:
+ template<typename Callable>
+ function_ref(Callable &&callable)
+ : callback(callback_fn<typename std::remove_reference<Callable>::type>),
+ callable(reinterpret_cast<intptr_t>(&callable)) {}
+ Ret operator()(Param1 param1, Param2 param2, Param3 param3) {
+ return callback(callable,
+ std::forward<Param1>(param1),
+ std::forward<Param2>(param2),
+ std::forward<Param3>(param3));
+ }
+};
+
+#endif
+
// deleter - Very very very simple method that is used to invoke operator
// delete on something. It is used like this:
//
@@ -165,27 +290,20 @@ struct less_second {
// Extra additions for arrays
//===----------------------------------------------------------------------===//
-/// Find where an array ends (for ending iterators)
-/// This returns a pointer to the byte immediately
-/// after the end of an array.
-template<class T, std::size_t N>
-inline T *array_endof(T (&x)[N]) {
- return x+N;
-}
-
/// Find the length of an array.
-template<class T, std::size_t N>
-inline size_t array_lengthof(T (&)[N]) {
+template <class T, std::size_t N>
+LLVM_CONSTEXPR inline size_t array_lengthof(T (&)[N]) {
return N;
}
-/// array_pod_sort_comparator - This is helper function for array_pod_sort,
-/// which just uses operator< on T.
+/// Adapt std::less<T> for array_pod_sort.
template<typename T>
inline int array_pod_sort_comparator(const void *P1, const void *P2) {
- if (*reinterpret_cast<const T*>(P1) < *reinterpret_cast<const T*>(P2))
+ if (std::less<T>()(*reinterpret_cast<const T*>(P1),
+ *reinterpret_cast<const T*>(P2)))
return -1;
- if (*reinterpret_cast<const T*>(P2) < *reinterpret_cast<const T*>(P1))
+ if (std::less<T>()(*reinterpret_cast<const T*>(P2),
+ *reinterpret_cast<const T*>(P1)))
return 1;
return 0;
}
@@ -208,7 +326,7 @@ inline int (*get_array_pod_sort_comparator(const T &))
/// possible.
///
/// This function assumes that you have simple POD-like types that can be
-/// compared with operator< and can be moved with memcpy. If this isn't true,
+/// compared with std::less and can be moved with memcpy. If this isn't true,
/// you should use std::sort.
///
/// NOTE: If qsort_r were portable, we could allow a custom comparator and
@@ -412,6 +530,13 @@ make_unique(size_t n) {
#endif
+template<typename First, typename Second>
+struct pair_hash {
+ size_t operator()(const std::pair<First, Second> &P) const {
+ return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second);
+ }
+};
+
} // End llvm namespace
#endif
diff --git a/include/llvm/ADT/ScopedHashTable.h b/include/llvm/ADT/ScopedHashTable.h
index efddd9f..3cc7738 100644
--- a/include/llvm/ADT/ScopedHashTable.h
+++ b/include/llvm/ADT/ScopedHashTable.h
@@ -159,18 +159,16 @@ private:
void operator=(const ScopedHashTable&); // NOT YET IMPLEMENTED
friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
public:
- ScopedHashTable() : CurScope(0) {}
+ ScopedHashTable() : CurScope(nullptr) {}
ScopedHashTable(AllocatorTy A) : CurScope(0), Allocator(A) {}
~ScopedHashTable() {
- assert(CurScope == 0 && TopLevelMap.empty() && "Scope imbalance!");
+ assert(!CurScope && TopLevelMap.empty() && "Scope imbalance!");
}
/// Access to the allocator.
- typedef typename ReferenceAdder<AllocatorTy>::result AllocatorRefTy;
- typedef typename ReferenceAdder<const AllocatorTy>::result AllocatorCRefTy;
- AllocatorRefTy getAllocator() { return Allocator; }
- AllocatorCRefTy getAllocator() const { return Allocator; }
+ AllocatorTy &getAllocator() { return Allocator; }
+ const AllocatorTy &getAllocator() const { return Allocator; }
bool count(const K &Key) const {
return TopLevelMap.count(Key);
@@ -222,7 +220,7 @@ ScopedHashTableScope<K, V, KInfo, Allocator>::
ScopedHashTableScope(ScopedHashTable<K, V, KInfo, Allocator> &ht) : HT(ht) {
PrevScope = HT.CurScope;
HT.CurScope = this;
- LastValInScope = 0;
+ LastValInScope = nullptr;
}
template <typename K, typename V, typename KInfo, typename Allocator>
@@ -233,7 +231,7 @@ ScopedHashTableScope<K, V, KInfo, Allocator>::~ScopedHashTableScope() {
// Pop and delete all values corresponding to this scope.
while (ScopedHashTableVal<K, V> *ThisEntry = LastValInScope) {
// Pop this value out of the TopLevelMap.
- if (ThisEntry->getNextForKey() == 0) {
+ if (!ThisEntry->getNextForKey()) {
assert(HT.TopLevelMap[ThisEntry->getKey()] == ThisEntry &&
"Scope imbalance!");
HT.TopLevelMap.erase(ThisEntry->getKey());
diff --git a/include/llvm/ADT/SmallVector.h b/include/llvm/ADT/SmallVector.h
index 0a4140e..dcf0354 100644
--- a/include/llvm/ADT/SmallVector.h
+++ b/include/llvm/ADT/SmallVector.h
@@ -220,28 +220,20 @@ protected:
/// Guarantees space for at least one more element, or MinSize more
/// elements if specified.
void grow(size_t MinSize = 0);
-
+
public:
void push_back(const T &Elt) {
- if (this->EndX < this->CapacityX) {
- Retry:
- ::new ((void*) this->end()) T(Elt);
- this->setEnd(this->end()+1);
- return;
- }
- this->grow();
- goto Retry;
+ if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+ this->grow();
+ ::new ((void*) this->end()) T(Elt);
+ this->setEnd(this->end()+1);
}
void push_back(T &&Elt) {
- if (this->EndX < this->CapacityX) {
- Retry:
- ::new ((void*) this->end()) T(::std::move(Elt));
- this->setEnd(this->end()+1);
- return;
- }
- this->grow();
- goto Retry;
+ if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+ this->grow();
+ ::new ((void*) this->end()) T(::std::move(Elt));
+ this->setEnd(this->end()+1);
}
void pop_back() {
@@ -255,7 +247,7 @@ template <typename T, bool isPodLike>
void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
size_t CurCapacity = this->capacity();
size_t CurSize = this->size();
- // Always grow, even from zero.
+ // Always grow, even from zero.
size_t NewCapacity = size_t(NextPowerOf2(CurCapacity+2));
if (NewCapacity < MinSize)
NewCapacity = MinSize;
@@ -335,16 +327,12 @@ protected:
}
public:
void push_back(const T &Elt) {
- if (this->EndX < this->CapacityX) {
- Retry:
- memcpy(this->end(), &Elt, sizeof(T));
- this->setEnd(this->end()+1);
- return;
- }
- this->grow();
- goto Retry;
+ if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+ this->grow();
+ memcpy(this->end(), &Elt, sizeof(T));
+ this->setEnd(this->end()+1);
}
-
+
void pop_back() {
this->setEnd(this->end()-1);
}
@@ -493,26 +481,25 @@ public:
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
- if (this->EndX < this->CapacityX) {
- Retry:
- ::new ((void*) this->end()) T(::std::move(this->back()));
- this->setEnd(this->end()+1);
- // Push everything else over.
- this->move_backward(I, this->end()-1, this->end());
+ if (this->EndX >= this->CapacityX) {
+ size_t EltNo = I-this->begin();
+ this->grow();
+ I = this->begin()+EltNo;
+ }
- // If we just moved the element we're inserting, be sure to update
- // the reference.
- T *EltPtr = &Elt;
- if (I <= EltPtr && EltPtr < this->EndX)
- ++EltPtr;
+ ::new ((void*) this->end()) T(::std::move(this->back()));
+ this->setEnd(this->end()+1);
+ // Push everything else over.
+ this->move_backward(I, this->end()-1, this->end());
- *I = ::std::move(*EltPtr);
- return I;
- }
- size_t EltNo = I-this->begin();
- this->grow();
- I = this->begin()+EltNo;
- goto Retry;
+ // If we just moved the element we're inserting, be sure to update
+ // the reference.
+ T *EltPtr = &Elt;
+ if (I <= EltPtr && EltPtr < this->EndX)
+ ++EltPtr;
+
+ *I = ::std::move(*EltPtr);
+ return I;
}
iterator insert(iterator I, const T &Elt) {
@@ -524,26 +511,24 @@ public:
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
- if (this->EndX < this->CapacityX) {
- Retry:
- ::new ((void*) this->end()) T(this->back());
- this->setEnd(this->end()+1);
- // Push everything else over.
- this->move_backward(I, this->end()-1, this->end());
-
- // If we just moved the element we're inserting, be sure to update
- // the reference.
- const T *EltPtr = &Elt;
- if (I <= EltPtr && EltPtr < this->EndX)
- ++EltPtr;
-
- *I = *EltPtr;
- return I;
+ if (this->EndX >= this->CapacityX) {
+ size_t EltNo = I-this->begin();
+ this->grow();
+ I = this->begin()+EltNo;
}
- size_t EltNo = I-this->begin();
- this->grow();
- I = this->begin()+EltNo;
- goto Retry;
+ ::new ((void*) this->end()) T(this->back());
+ this->setEnd(this->end()+1);
+ // Push everything else over.
+ this->move_backward(I, this->end()-1, this->end());
+
+ // If we just moved the element we're inserting, be sure to update
+ // the reference.
+ const T *EltPtr = &Elt;
+ if (I <= EltPtr && EltPtr < this->EndX)
+ ++EltPtr;
+
+ *I = *EltPtr;
+ return I;
}
iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
@@ -820,7 +805,7 @@ SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
this->grow(RHSSize);
} else if (CurSize) {
// Otherwise, use assignment for the already-constructed elements.
- this->move(RHS.begin(), RHS.end(), this->begin());
+ this->move(RHS.begin(), RHS.begin()+CurSize, this->begin());
}
// Move-construct the new elements in place.
diff --git a/include/llvm/ADT/SparseMultiSet.h b/include/llvm/ADT/SparseMultiSet.h
index 797a898..d2b2f8d 100644
--- a/include/llvm/ADT/SparseMultiSet.h
+++ b/include/llvm/ADT/SparseMultiSet.h
@@ -187,7 +187,7 @@ public:
typedef const ValueT *const_pointer;
SparseMultiSet()
- : Sparse(0), Universe(0), FreelistIdx(SMSNode::INVALID), NumFree(0) { }
+ : Sparse(nullptr), Universe(0), FreelistIdx(SMSNode::INVALID), NumFree(0) {}
~SparseMultiSet() { free(Sparse); }
diff --git a/include/llvm/ADT/SparseSet.h b/include/llvm/ADT/SparseSet.h
index b46ccc9..899f2e4 100644
--- a/include/llvm/ADT/SparseSet.h
+++ b/include/llvm/ADT/SparseSet.h
@@ -142,7 +142,7 @@ public:
typedef ValueT *pointer;
typedef const ValueT *const_pointer;
- SparseSet() : Sparse(0), Universe(0) {}
+ SparseSet() : Sparse(nullptr), Universe(0) {}
~SparseSet() { free(Sparse); }
/// setUniverse - Set the universe size which determines the largest key the
diff --git a/include/llvm/ADT/Statistic.h b/include/llvm/ADT/Statistic.h
index 26aac7b..d98abc3 100644
--- a/include/llvm/ADT/Statistic.h
+++ b/include/llvm/ADT/Statistic.h
@@ -46,7 +46,7 @@ public:
/// construct - This should only be called for non-global statistics.
void construct(const char *name, const char *desc) {
Name = name; Desc = desc;
- Value = 0; Initialized = 0;
+ Value = 0; Initialized = false;
}
// Allow use of this class as the value itself.
diff --git a/include/llvm/ADT/StringExtras.h b/include/llvm/ADT/StringExtras.h
index a0b3fe7..a152f4d 100644
--- a/include/llvm/ADT/StringExtras.h
+++ b/include/llvm/ADT/StringExtras.h
@@ -141,7 +141,7 @@ void SplitString(StringRef Source,
// better: http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx
// X*33+c -> X*33^c
static inline unsigned HashString(StringRef Str, unsigned Result = 0) {
- for (unsigned i = 0, e = Str.size(); i != e; ++i)
+ for (StringRef::size_type i = 0, e = Str.size(); i != e; ++i)
Result = Result * 33 + (unsigned char)Str[i];
return Result;
}
diff --git a/include/llvm/ADT/StringMap.h b/include/llvm/ADT/StringMap.h
index 4e74cf6..ecac5dd 100644
--- a/include/llvm/ADT/StringMap.h
+++ b/include/llvm/ADT/StringMap.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
#include <cstring>
+#include <utility>
namespace llvm {
template<typename ValueT>
@@ -48,13 +49,20 @@ protected:
unsigned NumTombstones;
unsigned ItemSize;
protected:
- explicit StringMapImpl(unsigned itemSize) : ItemSize(itemSize) {
- // Initialize the map with zero buckets to allocation.
- TheTable = 0;
- NumBuckets = 0;
- NumItems = 0;
- NumTombstones = 0;
+ explicit StringMapImpl(unsigned itemSize)
+ : TheTable(nullptr),
+ // Initialize the map with zero buckets to allocation.
+ NumBuckets(0), NumItems(0), NumTombstones(0), ItemSize(itemSize) {}
+ StringMapImpl(StringMapImpl &&RHS)
+ : TheTable(RHS.TheTable), NumBuckets(RHS.NumBuckets),
+ NumItems(RHS.NumItems), NumTombstones(RHS.NumTombstones),
+ ItemSize(RHS.ItemSize) {
+ RHS.TheTable = nullptr;
+ RHS.NumBuckets = 0;
+ RHS.NumItems = 0;
+ RHS.NumTombstones = 0;
}
+
StringMapImpl(unsigned InitSize, unsigned ItemSize);
void RehashTable();
@@ -109,8 +117,8 @@ public:
explicit StringMapEntry(unsigned strLen)
: StringMapEntryBase(strLen), second() {}
- StringMapEntry(unsigned strLen, const ValueTy &V)
- : StringMapEntryBase(strLen), second(V) {}
+ StringMapEntry(unsigned strLen, ValueTy V)
+ : StringMapEntryBase(strLen), second(std::move(V)) {}
StringRef getKey() const {
return StringRef(getKeyData(), getKeyLength());
@@ -146,7 +154,7 @@ public:
static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment));
// Default construct the value.
- new (NewItem) StringMapEntry(KeyLength, InitVal);
+ new (NewItem) StringMapEntry(KeyLength, std::move(InitVal));
// Copy the string information.
char *StrBuffer = const_cast<char*>(NewItem->getKeyData());
@@ -166,7 +174,7 @@ public:
static StringMapEntry *Create(const char *KeyStart, const char *KeyEnd,
InitType InitVal) {
MallocAllocator A;
- return Create(KeyStart, KeyEnd, A, InitVal);
+ return Create(KeyStart, KeyEnd, A, std::move(InitVal));
}
static StringMapEntry *Create(const char *KeyStart, const char *KeyEnd) {
@@ -198,8 +206,10 @@ public:
template<typename AllocatorTy>
void Destroy(AllocatorTy &Allocator) {
// Free memory referenced by the item.
+ unsigned AllocSize =
+ static_cast<unsigned>(sizeof(StringMapEntry)) + getKeyLength() + 1;
this->~StringMapEntry();
- Allocator.Deallocate(this);
+ Allocator.Deallocate(static_cast<void *>(this), AllocSize);
}
/// Destroy this object, releasing memory back to the malloc allocator.
@@ -231,23 +241,19 @@ public:
: StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))),
Allocator(A) {}
- StringMap(const StringMap &RHS)
- : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {
- assert(RHS.empty() &&
- "Copy ctor from non-empty stringmap not implemented yet!");
- (void)RHS;
- }
- void operator=(const StringMap &RHS) {
- assert(RHS.empty() &&
- "assignment from non-empty stringmap not implemented yet!");
- (void)RHS;
- clear();
+ StringMap(StringMap &&RHS)
+ : StringMapImpl(std::move(RHS)), Allocator(std::move(RHS.Allocator)) {}
+
+ StringMap &operator=(StringMap RHS) {
+ StringMapImpl::swap(RHS);
+ std::swap(Allocator, RHS.Allocator);
+ return *this;
}
- typedef typename ReferenceAdder<AllocatorTy>::result AllocatorRefTy;
- typedef typename ReferenceAdder<const AllocatorTy>::result AllocatorCRefTy;
- AllocatorRefTy getAllocator() { return Allocator; }
- AllocatorCRefTy getAllocator() const { return Allocator; }
+ // FIXME: Implement copy operations if/when they're needed.
+
+ AllocatorTy &getAllocator() { return Allocator; }
+ const AllocatorTy &getAllocator() const { return Allocator; }
typedef const char* key_type;
typedef ValueTy mapped_type;
@@ -330,7 +336,7 @@ public:
if (Bucket && Bucket != getTombstoneVal()) {
static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator);
}
- Bucket = 0;
+ Bucket = nullptr;
}
NumItems = 0;
@@ -348,7 +354,7 @@ public:
return *static_cast<MapEntryTy*>(Bucket);
MapEntryTy *NewItem =
- MapEntryTy::Create(Key.begin(), Key.end(), Allocator, Val);
+ MapEntryTy::Create(Key.begin(), Key.end(), Allocator, std::move(Val));
if (Bucket == getTombstoneVal())
--NumTombstones;
@@ -410,7 +416,7 @@ protected:
public:
typedef StringMapEntry<ValueTy> value_type;
- StringMapConstIterator() : Ptr(0) { }
+ StringMapConstIterator() : Ptr(nullptr) { }
explicit StringMapConstIterator(StringMapEntryBase **Bucket,
bool NoAdvance = false)
@@ -443,7 +449,7 @@ public:
private:
void AdvancePastEmptyBuckets() {
- while (*Ptr == 0 || *Ptr == StringMapImpl::getTombstoneVal())
+ while (*Ptr == nullptr || *Ptr == StringMapImpl::getTombstoneVal())
++Ptr;
}
};
diff --git a/include/llvm/ADT/StringRef.h b/include/llvm/ADT/StringRef.h
index 0514d7b..1f413e8 100644
--- a/include/llvm/ADT/StringRef.h
+++ b/include/llvm/ADT/StringRef.h
@@ -10,7 +10,6 @@
#ifndef LLVM_ADT_STRINGREF_H
#define LLVM_ADT_STRINGREF_H
-#include "llvm/Support/Allocator.h"
#include <algorithm>
#include <cassert>
#include <cstring>
@@ -70,7 +69,7 @@ namespace llvm {
/// @{
/// Construct an empty string ref.
- /*implicit*/ StringRef() : Data(0), Length(0) {}
+ /*implicit*/ StringRef() : Data(nullptr), Length(0) {}
/// Construct a string ref from a cstring.
/*implicit*/ StringRef(const char *Str)
@@ -124,9 +123,9 @@ namespace llvm {
return Data[Length-1];
}
- // copy - Allocate copy in BumpPtrAllocator and return StringRef to it.
- StringRef copy(BumpPtrAllocator &Allocator) {
- char *S = Allocator.Allocate<char>(Length);
+ // copy - Allocate copy in Allocator and return StringRef to it.
+ template <typename Allocator> StringRef copy(Allocator &A) {
+ char *S = A.template Allocate<char>(Length);
std::copy(begin(), end(), S);
return StringRef(S, Length);
}
@@ -186,7 +185,7 @@ namespace llvm {
/// str - Get the contents as an std::string.
std::string str() const {
- if (Data == 0) return std::string();
+ if (!Data) return std::string();
return std::string(Data, Length);
}
diff --git a/include/llvm/ADT/StringSwitch.h b/include/llvm/ADT/StringSwitch.h
index 7fd6e27..0393a0c 100644
--- a/include/llvm/ADT/StringSwitch.h
+++ b/include/llvm/ADT/StringSwitch.h
@@ -49,7 +49,7 @@ class StringSwitch {
public:
explicit StringSwitch(StringRef S)
- : Str(S), Result(0) { }
+ : Str(S), Result(nullptr) { }
template<unsigned N>
StringSwitch& Case(const char (&S)[N], const T& Value) {
diff --git a/include/llvm/ADT/TinyPtrVector.h b/include/llvm/ADT/TinyPtrVector.h
index 1df8d66..5669b2a 100644
--- a/include/llvm/ADT/TinyPtrVector.h
+++ b/include/llvm/ADT/TinyPtrVector.h
@@ -69,7 +69,7 @@ public:
}
TinyPtrVector(TinyPtrVector &&RHS) : Val(RHS.Val) {
- RHS.Val = (EltTy)0;
+ RHS.Val = (EltTy)nullptr;
}
TinyPtrVector &operator=(TinyPtrVector &&RHS) {
if (this == &RHS)
@@ -92,7 +92,7 @@ public:
}
Val = RHS.Val;
- RHS.Val = (EltTy)0;
+ RHS.Val = (EltTy)nullptr;
return *this;
}
@@ -174,7 +174,7 @@ public:
}
void push_back(EltTy NewVal) {
- assert(NewVal != 0 && "Can't add a null value");
+ assert(NewVal && "Can't add a null value");
// If we have nothing, add something.
if (Val.isNull()) {
@@ -195,7 +195,7 @@ public:
void pop_back() {
// If we have a single value, convert to empty.
if (Val.template is<EltTy>())
- Val = (EltTy)0;
+ Val = (EltTy)nullptr;
else if (VecTy *Vec = Val.template get<VecTy*>())
Vec->pop_back();
}
@@ -203,7 +203,7 @@ public:
void clear() {
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
- Val = (EltTy)0;
+ Val = (EltTy)nullptr;
} else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
// If we have a vector form, just clear it.
Vec->clear();
@@ -218,7 +218,7 @@ public:
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
if (I == begin())
- Val = (EltTy)0;
+ Val = (EltTy)nullptr;
} else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
// multiple items in a vector; just do the erase, there is no
// benefit to collapsing back to a pointer
@@ -234,7 +234,7 @@ public:
if (Val.template is<EltTy>()) {
if (S == begin() && S != E)
- Val = (EltTy)0;
+ Val = (EltTy)nullptr;
} else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
return Vec->erase(S, E);
}
diff --git a/include/llvm/ADT/Triple.h b/include/llvm/ADT/Triple.h
index 185003d..95f3380 100644
--- a/include/llvm/ADT/Triple.h
+++ b/include/llvm/ADT/Triple.h
@@ -48,7 +48,8 @@ public:
arm, // ARM (little endian): arm, armv.*, xscale
armeb, // ARM (big endian): armeb
- arm64, // ARM: arm64
+ arm64, // ARM64 (little endian): arm64
+ arm64_be, // ARM64 (big endian): arm64_be
aarch64, // AArch64 (little endian): aarch64
aarch64_be, // AArch64 (big endian): aarch64_be
hexagon, // Hexagon: hexagon
@@ -335,6 +336,10 @@ public:
return isMacOSX() || isiOS();
}
+ bool isOSFreeBSD() const {
+ return getOS() == Triple::FreeBSD;
+ }
+
bool isWindowsMSVCEnvironment() const {
return getOS() == Triple::Win32 &&
(getEnvironment() == Triple::UnknownEnvironment ||
@@ -362,7 +367,7 @@ public:
/// \brief Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
bool isOSMSVCRT() const {
- return getOS() == Triple::Win32 || getOS() == Triple::MinGW32;
+ return isWindowsMSVCEnvironment() || isWindowsGNUEnvironment();
}
/// \brief Tests whether the OS is Windows.
diff --git a/include/llvm/ADT/Twine.h b/include/llvm/ADT/Twine.h
index e16c6b4..a54fd74 100644
--- a/include/llvm/ADT/Twine.h
+++ b/include/llvm/ADT/Twine.h
@@ -374,7 +374,7 @@ namespace llvm {
static Twine utohexstr(const uint64_t &Val) {
Child LHS, RHS;
LHS.uHex = &Val;
- RHS.twine = 0;
+ RHS.twine = nullptr;
return Twine(LHS, UHexKind, RHS, EmptyKind);
}
diff --git a/include/llvm/ADT/edit_distance.h b/include/llvm/ADT/edit_distance.h
index f77ef13..9ee1edc 100644
--- a/include/llvm/ADT/edit_distance.h
+++ b/include/llvm/ADT/edit_distance.h
@@ -17,8 +17,8 @@
#define LLVM_ADT_EDIT_DISTANCE_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/OwningPtr.h"
#include <algorithm>
+#include <memory>
namespace llvm {
@@ -57,7 +57,7 @@ unsigned ComputeEditDistance(ArrayRef<T> FromArray, ArrayRef<T> ToArray,
const unsigned SmallBufferSize = 64;
unsigned SmallBuffer[SmallBufferSize];
- llvm::OwningArrayPtr<unsigned> Allocated;
+ std::unique_ptr<unsigned[]> Allocated;
unsigned *Previous = SmallBuffer;
if (2*(n + 1) > SmallBufferSize) {
Previous = new unsigned [2*(n+1)];
diff --git a/include/llvm/ADT/ilist.h b/include/llvm/ADT/ilist.h
index 6aeaa91..bc14845 100644
--- a/include/llvm/ADT/ilist.h
+++ b/include/llvm/ADT/ilist.h
@@ -83,7 +83,7 @@ struct ilist_sentinel_traits {
/// provideInitialHead - when constructing an ilist, provide a starting
/// value for its Head
/// @return null node to indicate that it needs to be allocated later
- static NodeTy *provideInitialHead() { return 0; }
+ static NodeTy *provideInitialHead() { return nullptr; }
/// ensureHead - make sure that Head is either already
/// initialized or assigned a fresh sentinel
@@ -92,7 +92,7 @@ struct ilist_sentinel_traits {
if (!Head) {
Head = ilist_traits<NodeTy>::createSentinel();
ilist_traits<NodeTy>::noteHead(Head, Head);
- ilist_traits<NodeTy>::setNext(Head, 0);
+ ilist_traits<NodeTy>::setNext(Head, nullptr);
return Head;
}
return ilist_traits<NodeTy>::getPrev(Head);
@@ -175,7 +175,7 @@ public:
ilist_iterator(pointer NP) : NodePtr(NP) {}
ilist_iterator(reference NR) : NodePtr(&NR) {}
- ilist_iterator() : NodePtr(0) {}
+ ilist_iterator() : NodePtr(nullptr) {}
// This is templated so that we can allow constructing a const iterator from
// a nonconst iterator...
@@ -383,7 +383,7 @@ public:
// Miscellaneous inspection routines.
size_type max_size() const { return size_type(-1); }
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
- return Head == 0 || Head == getTail();
+ return !Head || Head == getTail();
}
// Front and back accessor functions...
@@ -451,8 +451,8 @@ public:
// an ilist (and potentially deleted) with iterators still pointing at it.
// When those iterators are incremented or decremented, they will assert on
// the null next/prev pointer instead of "usually working".
- this->setNext(Node, 0);
- this->setPrev(Node, 0);
+ this->setNext(Node, nullptr);
+ this->setPrev(Node, nullptr);
return Node;
}
@@ -494,9 +494,9 @@ private:
// Note: we have to be careful about the case when we move the first node
// in the list. This node is the list sentinel node and we can't move it.
NodeTy *ThisSentinel = getTail();
- setTail(0);
+ setTail(nullptr);
NodeTy *L2Sentinel = L2.getTail();
- L2.setTail(0);
+ L2.setTail(nullptr);
// Remove [first, last) from its old position.
NodeTy *First = &*first, *Prev = this->getPrev(First);
@@ -537,7 +537,7 @@ public:
//
size_type LLVM_ATTRIBUTE_UNUSED_RESULT size() const {
- if (Head == 0) return 0; // Don't require construction of sentinel if empty.
+ if (!Head) return 0; // Don't require construction of sentinel if empty.
return std::distance(begin(), end());
}
diff --git a/include/llvm/ADT/ilist_node.h b/include/llvm/ADT/ilist_node.h
index 0361244..85aa7a4 100644
--- a/include/llvm/ADT/ilist_node.h
+++ b/include/llvm/ADT/ilist_node.h
@@ -30,7 +30,7 @@ protected:
NodeTy *getPrev() { return Prev; }
const NodeTy *getPrev() const { return Prev; }
void setPrev(NodeTy *P) { Prev = P; }
- ilist_half_node() : Prev(0) {}
+ ilist_half_node() : Prev(nullptr) {}
};
template<typename NodeTy>
@@ -48,7 +48,7 @@ class ilist_node : private ilist_half_node<NodeTy> {
const NodeTy *getNext() const { return Next; }
void setNext(NodeTy *N) { Next = N; }
protected:
- ilist_node() : Next(0) {}
+ ilist_node() : Next(nullptr) {}
public:
/// @name Adjacent Node Accessors
@@ -60,7 +60,7 @@ public:
// Check for sentinel.
if (!Prev->getNext())
- return 0;
+ return nullptr;
return Prev;
}
@@ -71,7 +71,7 @@ public:
// Check for sentinel.
if (!Prev->getNext())
- return 0;
+ return nullptr;
return Prev;
}
@@ -82,7 +82,7 @@ public:
// Check for sentinel.
if (!Next->getNext())
- return 0;
+ return nullptr;
return Next;
}
@@ -93,7 +93,7 @@ public:
// Check for sentinel.
if (!Next->getNext())
- return 0;
+ return nullptr;
return Next;
}
diff --git a/include/llvm/ADT/iterator.h b/include/llvm/ADT/iterator.h
new file mode 100644
index 0000000..56041db
--- /dev/null
+++ b/include/llvm/ADT/iterator.h
@@ -0,0 +1,244 @@
+//===- iterator.h - Utilities for using and defining iterators --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ITERATOR_H
+#define LLVM_ADT_ITERATOR_H
+
+#include <iterator>
+#include <cstddef>
+
+namespace llvm {
+
+/// \brief CRTP base class which implements the entire standard iterator facade
+/// in terms of a minimal subset of the interface.
+///
+/// Use this when it is reasonable to implement most of the iterator
+/// functionality in terms of a core subset. If you need special behavior or
+/// there are performance implications for this, you may want to override the
+/// relevant members instead.
+///
+/// Note, one abstraction that this does *not* provide is implementing
+/// subtraction in terms of addition by negating the difference. Negation isn't
+/// always information preserving, and I can see very reasonable iterator
+/// designs where this doesn't work well. It doesn't really force much added
+/// boilerplate anyways.
+///
+/// Another abstraction that this doesn't provide is implementing increment in
+/// terms of addition of one. These aren't equivalent for all iterator
+/// categories, and respecting that adds a lot of complexity for little gain.
+template <typename DerivedT, typename IteratorCategoryT, typename T,
+ typename DifferenceTypeT = std::ptrdiff_t, typename PointerT = T *,
+ typename ReferenceT = T &>
+class iterator_facade_base
+ : public std::iterator<IteratorCategoryT, T, DifferenceTypeT, PointerT,
+ ReferenceT> {
+protected:
+ enum {
+ IsRandomAccess =
+ std::is_base_of<std::random_access_iterator_tag, IteratorCategoryT>::value,
+ IsBidirectional =
+ std::is_base_of<std::bidirectional_iterator_tag, IteratorCategoryT>::value,
+ };
+
+public:
+ DerivedT operator+(DifferenceTypeT n) const {
+ static_assert(
+ IsRandomAccess,
+ "The '+' operator is only defined for random access iterators.");
+ DerivedT tmp = *static_cast<const DerivedT *>(this);
+ tmp += n;
+ return tmp;
+ }
+ friend DerivedT operator+(DifferenceTypeT n, const DerivedT &i) {
+ static_assert(
+ IsRandomAccess,
+ "The '+' operator is only defined for random access iterators.");
+ return i + n;
+ }
+ DerivedT operator-(DifferenceTypeT n) const {
+ static_assert(
+ IsRandomAccess,
+ "The '-' operator is only defined for random access iterators.");
+ DerivedT tmp = *static_cast<const DerivedT *>(this);
+ tmp -= n;
+ return tmp;
+ }
+
+ DerivedT &operator++() {
+ return static_cast<DerivedT *>(this)->operator+=(1);
+ }
+ DerivedT operator++(int) {
+ DerivedT tmp = *static_cast<DerivedT *>(this);
+ ++*static_cast<DerivedT *>(this);
+ return tmp;
+ }
+ DerivedT &operator--() {
+ static_assert(
+ IsBidirectional,
+ "The decrement operator is only defined for bidirectional iterators.");
+ return static_cast<DerivedT *>(this)->operator-=(1);
+ }
+ DerivedT operator--(int) {
+ static_assert(
+ IsBidirectional,
+ "The decrement operator is only defined for bidirectional iterators.");
+ DerivedT tmp = *static_cast<DerivedT *>(this);
+ --*static_cast<DerivedT *>(this);
+ return tmp;
+ }
+
+ bool operator!=(const DerivedT &RHS) const {
+ return !static_cast<const DerivedT *>(this)->operator==(RHS);
+ }
+
+ bool operator>(const DerivedT &RHS) const {
+ static_assert(
+ IsRandomAccess,
+ "Relational operators are only defined for random access iterators.");
+ return !static_cast<const DerivedT *>(this)->operator<(RHS) &&
+ !static_cast<const DerivedT *>(this)->operator==(RHS);
+ }
+ bool operator<=(const DerivedT &RHS) const {
+ static_assert(
+ IsRandomAccess,
+ "Relational operators are only defined for random access iterators.");
+ return !static_cast<const DerivedT *>(this)->operator>(RHS);
+ }
+ bool operator>=(const DerivedT &RHS) const {
+ static_assert(
+ IsRandomAccess,
+ "Relational operators are only defined for random access iterators.");
+ return !static_cast<const DerivedT *>(this)->operator<(RHS);
+ }
+
+ PointerT operator->() const {
+ return &static_cast<const DerivedT *>(this)->operator*();
+ }
+ ReferenceT operator[](DifferenceTypeT n) const {
+ static_assert(IsRandomAccess,
+ "Subscripting is only defined for random access iterators.");
+ return *static_cast<const DerivedT *>(this)->operator+(n);
+ }
+};
+
+/// \brief CRTP base class for adapting an iterator to a different type.
+///
+/// This class can be used through CRTP to adapt one iterator into another.
+/// Typically this is done through providing in the derived class a custom \c
+/// operator* implementation. Other methods can be overridden as well.
+template <
+ typename DerivedT, typename WrappedIteratorT,
+ typename IteratorCategoryT =
+ typename std::iterator_traits<WrappedIteratorT>::iterator_category,
+ typename T = typename std::iterator_traits<WrappedIteratorT>::value_type,
+ typename DifferenceTypeT =
+ typename std::iterator_traits<WrappedIteratorT>::difference_type,
+ typename PointerT = T *, typename ReferenceT = T &,
+ // Don't provide these, they are mostly to act as aliases below.
+ typename WrappedTraitsT = std::iterator_traits<WrappedIteratorT>>
+class iterator_adaptor_base
+ : public iterator_facade_base<DerivedT, IteratorCategoryT, T,
+ DifferenceTypeT, PointerT, ReferenceT> {
+ typedef typename iterator_adaptor_base::iterator_facade_base BaseT;
+
+protected:
+ WrappedIteratorT I;
+
+ iterator_adaptor_base() {}
+
+ template <typename U>
+ explicit iterator_adaptor_base(
+ U &&u,
+ typename std::enable_if<
+ !std::is_base_of<typename std::remove_cv<
+ typename std::remove_reference<U>::type>::type,
+ DerivedT>::value,
+ int>::type = 0)
+ : I(std::forward<U &&>(u)) {}
+
+public:
+ typedef DifferenceTypeT difference_type;
+
+ DerivedT &operator+=(difference_type n) {
+ static_assert(
+ BaseT::IsRandomAccess,
+ "The '+=' operator is only defined for random access iterators.");
+ I += n;
+ return *static_cast<DerivedT *>(this);
+ }
+ DerivedT &operator-=(difference_type n) {
+ static_assert(
+ BaseT::IsRandomAccess,
+ "The '-=' operator is only defined for random access iterators.");
+ I -= n;
+ return *static_cast<DerivedT *>(this);
+ }
+ using BaseT::operator-;
+ difference_type operator-(const DerivedT &RHS) const {
+ static_assert(
+ BaseT::IsRandomAccess,
+ "The '-' operator is only defined for random access iterators.");
+ return I - RHS.I;
+ }
+
+ // We have to explicitly provide ++ and -- rather than letting the facade
+ // forward to += because WrappedIteratorT might not support +=.
+ using BaseT::operator++;
+ DerivedT &operator++() {
+ ++I;
+ return *static_cast<DerivedT *>(this);
+ }
+ using BaseT::operator--;
+ DerivedT &operator--() {
+ static_assert(
+ BaseT::IsBidirectional,
+ "The decrement operator is only defined for bidirectional iterators.");
+ --I;
+ return *static_cast<DerivedT *>(this);
+ }
+
+ bool operator==(const DerivedT &RHS) const { return I == RHS.I; }
+ bool operator<(const DerivedT &RHS) const {
+ static_assert(
+ BaseT::IsRandomAccess,
+ "Relational operators are only defined for random access iterators.");
+ return I < RHS.I;
+ }
+
+ ReferenceT operator*() const { return *I; }
+};
+
+/// \brief An iterator type that allows iterating over the pointees via some
+/// other iterator.
+///
+/// The typical usage of this is to expose a type that iterates over Ts, but
+/// which is implemented with some iterator over T*s:
+///
+/// \code
+/// typedef pointee_iterator<SmallVectorImpl<T *>::iterator> iterator;
+/// \endcode
+template <typename WrappedIteratorT,
+ typename T = typename std::remove_reference<
+ decltype(**std::declval<WrappedIteratorT>())>::type>
+struct pointee_iterator
+ : iterator_adaptor_base<
+ pointee_iterator<WrappedIteratorT>, WrappedIteratorT,
+ typename std::iterator_traits<WrappedIteratorT>::iterator_category,
+ T> {
+ pointee_iterator() {}
+ template <typename U>
+ pointee_iterator(U &&u)
+ : pointee_iterator::iterator_adaptor_base(std::forward<U &&>(u)) {}
+
+ T &operator*() const { return **this->I; }
+};
+
+}
+
+#endif
diff --git a/include/llvm/ADT/iterator_range.h b/include/llvm/ADT/iterator_range.h
index 4f2f321..dd17d6c 100644
--- a/include/llvm/ADT/iterator_range.h
+++ b/include/llvm/ADT/iterator_range.h
@@ -40,6 +40,14 @@ public:
IteratorT begin() const { return begin_iterator; }
IteratorT end() const { return end_iterator; }
};
+
+/// \brief Convenience function for iterating over sub-ranges.
+///
+/// This provides a bit of syntactic sugar to make using sub-ranges
+/// in for loops a bit easier. Analogous to std::make_pair().
+template <class T> iterator_range<T> make_range(T x, T y) {
+ return iterator_range<T>(std::move(x), std::move(y));
+}
}
#endif
diff --git a/include/llvm/Analysis/AliasAnalysis.h b/include/llvm/Analysis/AliasAnalysis.h
index a06a562..8852866 100644
--- a/include/llvm/Analysis/AliasAnalysis.h
+++ b/include/llvm/Analysis/AliasAnalysis.h
@@ -75,7 +75,7 @@ protected:
public:
static char ID; // Class identification, replacement for typeinfo
- AliasAnalysis() : DL(0), TLI(0), AA(0) {}
+ AliasAnalysis() : DL(nullptr), TLI(nullptr), AA(nullptr) {}
virtual ~AliasAnalysis(); // We want to be subclassed
/// UnknownSize - This is a special value which can be used with the
@@ -116,8 +116,8 @@ public:
/// the location, or null if there is no known unique tag.
const MDNode *TBAATag;
- explicit Location(const Value *P = 0, uint64_t S = UnknownSize,
- const MDNode *N = 0)
+ explicit Location(const Value *P = nullptr, uint64_t S = UnknownSize,
+ const MDNode *N = nullptr)
: Ptr(P), Size(S), TBAATag(N) {}
Location getWithNewPtr(const Value *NewPtr) const {
@@ -134,7 +134,7 @@ public:
Location getWithoutTBAATag() const {
Location Copy(*this);
- Copy.TBAATag = 0;
+ Copy.TBAATag = nullptr;
return Copy;
}
};
@@ -560,12 +560,12 @@ struct DenseMapInfo<AliasAnalysis::Location> {
static inline AliasAnalysis::Location getEmptyKey() {
return
AliasAnalysis::Location(DenseMapInfo<const Value *>::getEmptyKey(),
- 0, 0);
+ 0, nullptr);
}
static inline AliasAnalysis::Location getTombstoneKey() {
return
AliasAnalysis::Location(DenseMapInfo<const Value *>::getTombstoneKey(),
- 0, 0);
+ 0, nullptr);
}
static unsigned getHashValue(const AliasAnalysis::Location &Val) {
return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^
diff --git a/include/llvm/Analysis/AliasSetTracker.h b/include/llvm/Analysis/AliasSetTracker.h
index 72e75ec..6117d91 100644
--- a/include/llvm/Analysis/AliasSetTracker.h
+++ b/include/llvm/Analysis/AliasSetTracker.h
@@ -43,13 +43,13 @@ class AliasSet : public ilist_node<AliasSet> {
const MDNode *TBAAInfo;
public:
PointerRec(Value *V)
- : Val(V), PrevInList(0), NextInList(0), AS(0), Size(0),
+ : Val(V), PrevInList(nullptr), NextInList(nullptr), AS(nullptr), Size(0),
TBAAInfo(DenseMapInfo<const MDNode *>::getEmptyKey()) {}
Value *getValue() const { return Val; }
PointerRec *getNext() const { return NextInList; }
- bool hasAliasSet() const { return AS != 0; }
+ bool hasAliasSet() const { return AS != nullptr; }
PointerRec** setPrevInList(PointerRec **PIL) {
PrevInList = PIL;
@@ -75,7 +75,7 @@ class AliasSet : public ilist_node<AliasSet> {
// If we have missing or conflicting TBAAInfo, return null.
if (TBAAInfo == DenseMapInfo<const MDNode *>::getEmptyKey() ||
TBAAInfo == DenseMapInfo<const MDNode *>::getTombstoneKey())
- return 0;
+ return nullptr;
return TBAAInfo;
}
@@ -91,7 +91,7 @@ class AliasSet : public ilist_node<AliasSet> {
}
void setAliasSet(AliasSet *as) {
- assert(AS == 0 && "Already have an alias set!");
+ assert(!AS && "Already have an alias set!");
AS = as;
}
@@ -100,7 +100,7 @@ class AliasSet : public ilist_node<AliasSet> {
*PrevInList = NextInList;
if (AS->PtrListEnd == &NextInList) {
AS->PtrListEnd = PrevInList;
- assert(*AS->PtrListEnd == 0 && "List not terminated right!");
+ assert(*AS->PtrListEnd == nullptr && "List not terminated right!");
}
delete this;
}
@@ -174,7 +174,7 @@ public:
class iterator;
iterator begin() const { return iterator(PtrList); }
iterator end() const { return iterator(); }
- bool empty() const { return PtrList == 0; }
+ bool empty() const { return PtrList == nullptr; }
void print(raw_ostream &OS) const;
void dump() const;
@@ -184,7 +184,7 @@ public:
PointerRec, ptrdiff_t> {
PointerRec *CurNode;
public:
- explicit iterator(PointerRec *CN = 0) : CurNode(CN) {}
+ explicit iterator(PointerRec *CN = nullptr) : CurNode(CN) {}
bool operator==(const iterator& x) const {
return CurNode == x.CurNode;
@@ -220,8 +220,9 @@ private:
// Can only be created by AliasSetTracker. Also, ilist creates one
// to serve as a sentinel.
friend struct ilist_sentinel_traits<AliasSet>;
- AliasSet() : PtrList(0), PtrListEnd(&PtrList), Forward(0), RefCount(0),
- AccessTy(NoModRef), AliasTy(MustAlias), Volatile(false) {
+ AliasSet()
+ : PtrList(nullptr), PtrListEnd(&PtrList), Forward(nullptr), RefCount(0),
+ AccessTy(NoModRef), AliasTy(MustAlias), Volatile(false) {
}
AliasSet(const AliasSet &AS) LLVM_DELETED_FUNCTION;
@@ -285,7 +286,7 @@ class AliasSetTracker {
void deleted() override;
void allUsesReplacedWith(Value *) override;
public:
- ASTCallbackVH(Value *V, AliasSetTracker *AST = 0);
+ ASTCallbackVH(Value *V, AliasSetTracker *AST = nullptr);
ASTCallbackVH &operator=(Value *V);
};
/// ASTCallbackVHDenseMapInfo - Traits to tell DenseMap that tell us how to
@@ -354,7 +355,7 @@ public:
/// pointer didn't alias anything).
AliasSet &getAliasSetForPointer(Value *P, uint64_t Size,
const MDNode *TBAAInfo,
- bool *New = 0);
+ bool *New = nullptr);
/// getAliasSetForPointerIfExists - Return the alias set containing the
/// location specified if one exists, otherwise return null.
@@ -408,7 +409,7 @@ private:
// entry for the pointer if it doesn't already exist.
AliasSet::PointerRec &getEntryFor(Value *V) {
AliasSet::PointerRec *&Entry = PointerMap[ASTCallbackVH(V, this)];
- if (Entry == 0)
+ if (!Entry)
Entry = new AliasSet::PointerRec(V);
return *Entry;
}
diff --git a/include/llvm/Analysis/BlockFrequencyImpl.h b/include/llvm/Analysis/BlockFrequencyImpl.h
deleted file mode 100644
index 5488847..0000000
--- a/include/llvm/Analysis/BlockFrequencyImpl.h
+++ /dev/null
@@ -1,379 +0,0 @@
-//===-- BlockFrequencyImpl.h - Block Frequency Implementation --*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Shared implementation of BlockFrequency for IR and Machine Instructions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ANALYSIS_BLOCKFREQUENCYIMPL_H
-#define LLVM_ANALYSIS_BLOCKFREQUENCYIMPL_H
-
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/IR/BasicBlock.h"
-#include "llvm/Support/BlockFrequency.h"
-#include "llvm/Support/BranchProbability.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include <string>
-#include <vector>
-
-namespace llvm {
-
-
-class BlockFrequencyInfo;
-class MachineBlockFrequencyInfo;
-
-/// BlockFrequencyImpl implements block frequency algorithm for IR and
-/// Machine Instructions. Algorithm starts with value ENTRY_FREQ
-/// for the entry block and then propagates frequencies using branch weights
-/// from (Machine)BranchProbabilityInfo. LoopInfo is not required because
-/// algorithm can find "backedges" by itself.
-template<class BlockT, class FunctionT, class BlockProbInfoT>
-class BlockFrequencyImpl {
-
- DenseMap<const BlockT *, BlockFrequency> Freqs;
-
- BlockProbInfoT *BPI;
-
- FunctionT *Fn;
-
- typedef GraphTraits< Inverse<BlockT *> > GT;
-
- static const uint64_t EntryFreq = 1 << 14;
-
- std::string getBlockName(BasicBlock *BB) const {
- return BB->getName().str();
- }
-
- std::string getBlockName(MachineBasicBlock *MBB) const {
- std::string str;
- raw_string_ostream ss(str);
- ss << "BB#" << MBB->getNumber();
-
- if (const BasicBlock *BB = MBB->getBasicBlock())
- ss << " derived from LLVM BB " << BB->getName();
-
- return ss.str();
- }
-
- void setBlockFreq(BlockT *BB, BlockFrequency Freq) {
- Freqs[BB] = Freq;
- DEBUG(dbgs() << "Frequency(" << getBlockName(BB) << ") = ";
- printBlockFreq(dbgs(), Freq) << "\n");
- }
-
- /// getEdgeFreq - Return edge frequency based on SRC frequency and Src -> Dst
- /// edge probability.
- BlockFrequency getEdgeFreq(BlockT *Src, BlockT *Dst) const {
- BranchProbability Prob = BPI->getEdgeProbability(Src, Dst);
- return getBlockFreq(Src) * Prob;
- }
-
- /// incBlockFreq - Increase BB block frequency by FREQ.
- ///
- void incBlockFreq(BlockT *BB, BlockFrequency Freq) {
- Freqs[BB] += Freq;
- DEBUG(dbgs() << "Frequency(" << getBlockName(BB) << ") += ";
- printBlockFreq(dbgs(), Freq) << " --> ";
- printBlockFreq(dbgs(), Freqs[BB]) << "\n");
- }
-
- // All blocks in postorder.
- std::vector<BlockT *> POT;
-
- // Map Block -> Position in reverse-postorder list.
- DenseMap<BlockT *, unsigned> RPO;
-
- // For each loop header, record the per-iteration probability of exiting the
- // loop. This is the reciprocal of the expected number of loop iterations.
- typedef DenseMap<BlockT*, BranchProbability> LoopExitProbMap;
- LoopExitProbMap LoopExitProb;
-
- // (reverse-)postorder traversal iterators.
- typedef typename std::vector<BlockT *>::iterator pot_iterator;
- typedef typename std::vector<BlockT *>::reverse_iterator rpot_iterator;
-
- pot_iterator pot_begin() { return POT.begin(); }
- pot_iterator pot_end() { return POT.end(); }
-
- rpot_iterator rpot_begin() { return POT.rbegin(); }
- rpot_iterator rpot_end() { return POT.rend(); }
-
- rpot_iterator rpot_at(BlockT *BB) {
- rpot_iterator I = rpot_begin();
- unsigned idx = RPO.lookup(BB);
- assert(idx);
- std::advance(I, idx - 1);
-
- assert(*I == BB);
- return I;
- }
-
- /// isBackedge - Return if edge Src -> Dst is a reachable backedge.
- ///
- bool isBackedge(BlockT *Src, BlockT *Dst) const {
- unsigned a = RPO.lookup(Src);
- if (!a)
- return false;
- unsigned b = RPO.lookup(Dst);
- assert(b && "Destination block should be reachable");
- return a >= b;
- }
-
- /// getSingleBlockPred - return single BB block predecessor or NULL if
- /// BB has none or more predecessors.
- BlockT *getSingleBlockPred(BlockT *BB) {
- typename GT::ChildIteratorType
- PI = GraphTraits< Inverse<BlockT *> >::child_begin(BB),
- PE = GraphTraits< Inverse<BlockT *> >::child_end(BB);
-
- if (PI == PE)
- return 0;
-
- BlockT *Pred = *PI;
-
- ++PI;
- if (PI != PE)
- return 0;
-
- return Pred;
- }
-
- void doBlock(BlockT *BB, BlockT *LoopHead,
- SmallPtrSet<BlockT *, 8> &BlocksInLoop) {
-
- DEBUG(dbgs() << "doBlock(" << getBlockName(BB) << ")\n");
- setBlockFreq(BB, 0);
-
- if (BB == LoopHead) {
- setBlockFreq(BB, EntryFreq);
- return;
- }
-
- if (BlockT *Pred = getSingleBlockPred(BB)) {
- if (BlocksInLoop.count(Pred))
- setBlockFreq(BB, getEdgeFreq(Pred, BB));
- // TODO: else? irreducible, ignore it for now.
- return;
- }
-
- bool isInLoop = false;
- bool isLoopHead = false;
-
- for (typename GT::ChildIteratorType
- PI = GraphTraits< Inverse<BlockT *> >::child_begin(BB),
- PE = GraphTraits< Inverse<BlockT *> >::child_end(BB);
- PI != PE; ++PI) {
- BlockT *Pred = *PI;
-
- if (isBackedge(Pred, BB)) {
- isLoopHead = true;
- } else if (BlocksInLoop.count(Pred)) {
- incBlockFreq(BB, getEdgeFreq(Pred, BB));
- isInLoop = true;
- }
- // TODO: else? irreducible.
- }
-
- if (!isInLoop)
- return;
-
- if (!isLoopHead)
- return;
-
- // This block is a loop header, so boost its frequency by the expected
- // number of loop iterations. The loop blocks will be revisited so they all
- // get this boost.
- typename LoopExitProbMap::const_iterator I = LoopExitProb.find(BB);
- assert(I != LoopExitProb.end() && "Loop header missing from table");
- Freqs[BB] /= I->second;
- DEBUG(dbgs() << "Loop header scaled to ";
- printBlockFreq(dbgs(), Freqs[BB]) << ".\n");
- }
-
- /// doLoop - Propagate block frequency down through the loop.
- void doLoop(BlockT *Head, BlockT *Tail) {
- DEBUG(dbgs() << "doLoop(" << getBlockName(Head) << ", "
- << getBlockName(Tail) << ")\n");
-
- SmallPtrSet<BlockT *, 8> BlocksInLoop;
-
- for (rpot_iterator I = rpot_at(Head), E = rpot_at(Tail); ; ++I) {
- BlockT *BB = *I;
- doBlock(BB, Head, BlocksInLoop);
-
- BlocksInLoop.insert(BB);
- if (I == E)
- break;
- }
-
- // Compute loop's cyclic probability using backedges probabilities.
- BlockFrequency BackFreq;
- for (typename GT::ChildIteratorType
- PI = GraphTraits< Inverse<BlockT *> >::child_begin(Head),
- PE = GraphTraits< Inverse<BlockT *> >::child_end(Head);
- PI != PE; ++PI) {
- BlockT *Pred = *PI;
- assert(Pred);
- if (isBackedge(Pred, Head))
- BackFreq += getEdgeFreq(Pred, Head);
- }
-
- // The cyclic probability is freq(BackEdges) / freq(Head), where freq(Head)
- // only counts edges entering the loop, not the loop backedges.
- // The probability of leaving the loop on each iteration is:
- //
- // ExitProb = 1 - CyclicProb
- //
- // The Expected number of loop iterations is:
- //
- // Iterations = 1 / ExitProb
- //
- uint64_t D = std::max(getBlockFreq(Head).getFrequency(), UINT64_C(1));
- uint64_t N = std::max(BackFreq.getFrequency(), UINT64_C(1));
- if (N < D)
- N = D - N;
- else
- // We'd expect N < D, but rounding and saturation means that can't be
- // guaranteed.
- N = 1;
-
- // Now ExitProb = N / D, make sure it fits in an i32/i32 fraction.
- assert(N <= D);
- if (D > UINT32_MAX) {
- unsigned Shift = 32 - countLeadingZeros(D);
- D >>= Shift;
- N >>= Shift;
- if (N == 0)
- N = 1;
- }
- BranchProbability LEP = BranchProbability(N, D);
- LoopExitProb.insert(std::make_pair(Head, LEP));
- DEBUG(dbgs() << "LoopExitProb[" << getBlockName(Head) << "] = " << LEP
- << " from 1 - ";
- printBlockFreq(dbgs(), BackFreq) << " / ";
- printBlockFreq(dbgs(), getBlockFreq(Head)) << ".\n");
- }
-
- friend class BlockFrequencyInfo;
- friend class MachineBlockFrequencyInfo;
-
- BlockFrequencyImpl() { }
-
- void doFunction(FunctionT *fn, BlockProbInfoT *bpi) {
- Fn = fn;
- BPI = bpi;
-
- // Clear everything.
- RPO.clear();
- POT.clear();
- LoopExitProb.clear();
- Freqs.clear();
-
- BlockT *EntryBlock = fn->begin();
-
- std::copy(po_begin(EntryBlock), po_end(EntryBlock), std::back_inserter(POT));
-
- unsigned RPOidx = 0;
- for (rpot_iterator I = rpot_begin(), E = rpot_end(); I != E; ++I) {
- BlockT *BB = *I;
- RPO[BB] = ++RPOidx;
- DEBUG(dbgs() << "RPO[" << getBlockName(BB) << "] = " << RPO[BB] << "\n");
- }
-
- // Travel over all blocks in postorder.
- for (pot_iterator I = pot_begin(), E = pot_end(); I != E; ++I) {
- BlockT *BB = *I;
- BlockT *LastTail = 0;
- DEBUG(dbgs() << "POT: " << getBlockName(BB) << "\n");
-
- for (typename GT::ChildIteratorType
- PI = GraphTraits< Inverse<BlockT *> >::child_begin(BB),
- PE = GraphTraits< Inverse<BlockT *> >::child_end(BB);
- PI != PE; ++PI) {
-
- BlockT *Pred = *PI;
- if (isBackedge(Pred, BB) && (!LastTail || RPO[Pred] > RPO[LastTail]))
- LastTail = Pred;
- }
-
- if (LastTail)
- doLoop(BB, LastTail);
- }
-
- // At the end assume the whole function as a loop, and travel over it once
- // again.
- doLoop(*(rpot_begin()), *(pot_begin()));
- }
-
-public:
-
- uint64_t getEntryFreq() { return EntryFreq; }
-
- /// getBlockFreq - Return block frequency. Return 0 if we don't have it.
- BlockFrequency getBlockFreq(const BlockT *BB) const {
- typename DenseMap<const BlockT *, BlockFrequency>::const_iterator
- I = Freqs.find(BB);
- if (I != Freqs.end())
- return I->second;
- return 0;
- }
-
- void print(raw_ostream &OS) const {
- OS << "\n\n---- Block Freqs ----\n";
- for (typename FunctionT::iterator I = Fn->begin(), E = Fn->end(); I != E;) {
- BlockT *BB = I++;
- OS << " " << getBlockName(BB) << " = ";
- printBlockFreq(OS, getBlockFreq(BB)) << "\n";
-
- for (typename GraphTraits<BlockT *>::ChildIteratorType
- SI = GraphTraits<BlockT *>::child_begin(BB),
- SE = GraphTraits<BlockT *>::child_end(BB); SI != SE; ++SI) {
- BlockT *Succ = *SI;
- OS << " " << getBlockName(BB) << " -> " << getBlockName(Succ)
- << " = "; printBlockFreq(OS, getEdgeFreq(BB, Succ)) << "\n";
- }
- }
- }
-
- void dump() const {
- print(dbgs());
- }
-
- // Utility method that looks up the block frequency associated with BB and
- // prints it to OS.
- raw_ostream &printBlockFreq(raw_ostream &OS,
- const BlockT *BB) {
- return printBlockFreq(OS, getBlockFreq(BB));
- }
-
- raw_ostream &printBlockFreq(raw_ostream &OS,
- const BlockFrequency &Freq) const {
- // Convert fixed-point number to decimal.
- uint64_t Frequency = Freq.getFrequency();
- OS << Frequency / EntryFreq << ".";
- uint64_t Rem = Frequency % EntryFreq;
- uint64_t Eps = 1;
- do {
- Rem *= 10;
- Eps *= 10;
- OS << Rem / EntryFreq;
- Rem = Rem % EntryFreq;
- } while (Rem >= Eps/2);
- return OS;
- }
-
-};
-
-}
-
-#endif
diff --git a/include/llvm/Analysis/BlockFrequencyInfo.h b/include/llvm/Analysis/BlockFrequencyInfo.h
index 2f701d9..3289a28 100644
--- a/include/llvm/Analysis/BlockFrequencyInfo.h
+++ b/include/llvm/Analysis/BlockFrequencyInfo.h
@@ -1,4 +1,4 @@
-//===------- BlockFrequencyInfo.h - Block Frequency Analysis --*- C++ -*---===//
+//===- BlockFrequencyInfo.h - Block Frequency Analysis ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,14 +21,12 @@
namespace llvm {
class BranchProbabilityInfo;
-template<class BlockT, class FunctionT, class BranchProbInfoT>
-class BlockFrequencyImpl;
+template <class BlockT> class BlockFrequencyInfoImpl;
-/// BlockFrequencyInfo pass uses BlockFrequencyImpl implementation to estimate
-/// IR basic block frequencies.
+/// BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to
+/// estimate IR basic block frequencies.
class BlockFrequencyInfo : public FunctionPass {
- typedef BlockFrequencyImpl<BasicBlock, Function, BranchProbabilityInfo>
- ImplType;
+ typedef BlockFrequencyInfoImpl<BasicBlock> ImplType;
std::unique_ptr<ImplType> BFI;
public:
diff --git a/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/include/llvm/Analysis/BlockFrequencyInfoImpl.h
new file mode 100644
index 0000000..bd72d3e
--- /dev/null
+++ b/include/llvm/Analysis/BlockFrequencyInfoImpl.h
@@ -0,0 +1,1859 @@
+//==- BlockFrequencyInfoImpl.h - Block Frequency Implementation -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Shared implementation of BlockFrequency for IR and Machine Instructions.
+// See the documentation below for BlockFrequencyInfoImpl for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
+#define LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/Support/BlockFrequency.h"
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include <deque>
+#include <list>
+#include <string>
+#include <vector>
+
+#define DEBUG_TYPE "block-freq"
+
+//===----------------------------------------------------------------------===//
+//
+// UnsignedFloat definition.
+//
+// TODO: Make this private to BlockFrequencyInfoImpl or delete.
+//
+//===----------------------------------------------------------------------===//
+namespace llvm {
+
+class UnsignedFloatBase {
+public:
+ static const int32_t MaxExponent = 16383;
+ static const int32_t MinExponent = -16382;
+ static const int DefaultPrecision = 10;
+
+ static void dump(uint64_t D, int16_t E, int Width);
+ static raw_ostream &print(raw_ostream &OS, uint64_t D, int16_t E, int Width,
+ unsigned Precision);
+ static std::string toString(uint64_t D, int16_t E, int Width,
+ unsigned Precision);
+ static int countLeadingZeros32(uint32_t N) { return countLeadingZeros(N); }
+ static int countLeadingZeros64(uint64_t N) { return countLeadingZeros(N); }
+ static uint64_t getHalf(uint64_t N) { return (N >> 1) + (N & 1); }
+
+ static std::pair<uint64_t, bool> splitSigned(int64_t N) {
+ if (N >= 0)
+ return std::make_pair(N, false);
+ uint64_t Unsigned = N == INT64_MIN ? UINT64_C(1) << 63 : uint64_t(-N);
+ return std::make_pair(Unsigned, true);
+ }
+ static int64_t joinSigned(uint64_t U, bool IsNeg) {
+ if (U > uint64_t(INT64_MAX))
+ return IsNeg ? INT64_MIN : INT64_MAX;
+ return IsNeg ? -int64_t(U) : int64_t(U);
+ }
+
+ static int32_t extractLg(const std::pair<int32_t, int> &Lg) {
+ return Lg.first;
+ }
+ static int32_t extractLgFloor(const std::pair<int32_t, int> &Lg) {
+ return Lg.first - (Lg.second > 0);
+ }
+ static int32_t extractLgCeiling(const std::pair<int32_t, int> &Lg) {
+ return Lg.first + (Lg.second < 0);
+ }
+
+ static std::pair<uint64_t, int16_t> divide64(uint64_t L, uint64_t R);
+ static std::pair<uint64_t, int16_t> multiply64(uint64_t L, uint64_t R);
+
+ static int compare(uint64_t L, uint64_t R, int Shift) {
+ assert(Shift >= 0);
+ assert(Shift < 64);
+
+ uint64_t L_adjusted = L >> Shift;
+ if (L_adjusted < R)
+ return -1;
+ if (L_adjusted > R)
+ return 1;
+
+ return L > L_adjusted << Shift ? 1 : 0;
+ }
+};
+
+/// \brief Simple representation of an unsigned floating point.
+///
+/// UnsignedFloat is a unsigned floating point number. It uses simple
+/// saturation arithmetic, and every operation is well-defined for every value.
+///
+/// The number is split into a signed exponent and unsigned digits. The number
+/// represented is \c getDigits()*2^getExponent(). In this way, the digits are
+/// much like the mantissa in the x87 long double, but there is no canonical
+/// form, so the same number can be represented by many bit representations
+/// (it's always in "denormal" mode).
+///
+/// UnsignedFloat is templated on the underlying integer type for digits, which
+/// is expected to be one of uint64_t, uint32_t, uint16_t or uint8_t.
+///
+/// Unlike builtin floating point types, UnsignedFloat is portable.
+///
+/// Unlike APFloat, UnsignedFloat does not model architecture floating point
+/// behaviour (this should make it a little faster), and implements most
+/// operators (this makes it usable).
+///
+/// UnsignedFloat is totally ordered. However, there is no canonical form, so
+/// there are multiple representations of most scalars. E.g.:
+///
+/// UnsignedFloat(8u, 0) == UnsignedFloat(4u, 1)
+/// UnsignedFloat(4u, 1) == UnsignedFloat(2u, 2)
+/// UnsignedFloat(2u, 2) == UnsignedFloat(1u, 3)
+///
+/// UnsignedFloat implements most arithmetic operations. Precision is kept
+/// where possible. Uses simple saturation arithmetic, so that operations
+/// saturate to 0.0 or getLargest() rather than under or overflowing. It has
+/// some extra arithmetic for unit inversion. 0.0/0.0 is defined to be 0.0.
+/// Any other division by 0.0 is defined to be getLargest().
+///
+/// As a convenience for modifying the exponent, left and right shifting are
+/// both implemented, and both interpret negative shifts as positive shifts in
+/// the opposite direction.
+///
+/// Exponents are limited to the range accepted by x87 long double. This makes
+/// it trivial to add functionality to convert to APFloat (this is already
+/// relied on for the implementation of printing).
+///
+/// The current plan is to gut this and make the necessary parts of it (even
+/// more) private to BlockFrequencyInfo.
+template <class DigitsT> class UnsignedFloat : UnsignedFloatBase {
+public:
+ static_assert(!std::numeric_limits<DigitsT>::is_signed,
+ "only unsigned floats supported");
+
+ typedef DigitsT DigitsType;
+
+private:
+ typedef std::numeric_limits<DigitsType> DigitsLimits;
+
+ static const int Width = sizeof(DigitsType) * 8;
+ static_assert(Width <= 64, "invalid integer width for digits");
+
+private:
+ DigitsType Digits;
+ int16_t Exponent;
+
+public:
+ UnsignedFloat() : Digits(0), Exponent(0) {}
+
+ UnsignedFloat(DigitsType Digits, int16_t Exponent)
+ : Digits(Digits), Exponent(Exponent) {}
+
+private:
+ UnsignedFloat(const std::pair<uint64_t, int16_t> &X)
+ : Digits(X.first), Exponent(X.second) {}
+
+public:
+ static UnsignedFloat getZero() { return UnsignedFloat(0, 0); }
+ static UnsignedFloat getOne() { return UnsignedFloat(1, 0); }
+ static UnsignedFloat getLargest() {
+ return UnsignedFloat(DigitsLimits::max(), MaxExponent);
+ }
+ static UnsignedFloat getFloat(uint64_t N) { return adjustToWidth(N, 0); }
+ static UnsignedFloat getInverseFloat(uint64_t N) {
+ return getFloat(N).invert();
+ }
+ static UnsignedFloat getFraction(DigitsType N, DigitsType D) {
+ return getQuotient(N, D);
+ }
+
+ int16_t getExponent() const { return Exponent; }
+ DigitsType getDigits() const { return Digits; }
+
+ /// \brief Convert to the given integer type.
+ ///
+ /// Convert to \c IntT using simple saturating arithmetic, truncating if
+ /// necessary.
+ template <class IntT> IntT toInt() const;
+
+ bool isZero() const { return !Digits; }
+ bool isLargest() const { return *this == getLargest(); }
+ bool isOne() const {
+ if (Exponent > 0 || Exponent <= -Width)
+ return false;
+ return Digits == DigitsType(1) << -Exponent;
+ }
+
+ /// \brief The log base 2, rounded.
+ ///
+ /// Get the lg of the scalar. lg 0 is defined to be INT32_MIN.
+ int32_t lg() const { return extractLg(lgImpl()); }
+
+ /// \brief The log base 2, rounded towards INT32_MIN.
+ ///
+ /// Get the lg floor. lg 0 is defined to be INT32_MIN.
+ int32_t lgFloor() const { return extractLgFloor(lgImpl()); }
+
+ /// \brief The log base 2, rounded towards INT32_MAX.
+ ///
+ /// Get the lg ceiling. lg 0 is defined to be INT32_MIN.
+ int32_t lgCeiling() const { return extractLgCeiling(lgImpl()); }
+
+ bool operator==(const UnsignedFloat &X) const { return compare(X) == 0; }
+ bool operator<(const UnsignedFloat &X) const { return compare(X) < 0; }
+ bool operator!=(const UnsignedFloat &X) const { return compare(X) != 0; }
+ bool operator>(const UnsignedFloat &X) const { return compare(X) > 0; }
+ bool operator<=(const UnsignedFloat &X) const { return compare(X) <= 0; }
+ bool operator>=(const UnsignedFloat &X) const { return compare(X) >= 0; }
+
+ bool operator!() const { return isZero(); }
+
+ /// \brief Convert to a decimal representation in a string.
+ ///
+ /// Convert to a string. Uses scientific notation for very large/small
+ /// numbers. Scientific notation is used roughly for numbers outside of the
+ /// range 2^-64 through 2^64.
+ ///
+ /// \c Precision indicates the number of decimal digits of precision to use;
+ /// 0 requests the maximum available.
+ ///
+ /// As a special case to make debugging easier, if the number is small enough
+ /// to convert without scientific notation and has more than \c Precision
+ /// digits before the decimal place, it's printed accurately to the first
+ /// digit past zero. E.g., assuming 10 digits of precision:
+ ///
+ /// 98765432198.7654... => 98765432198.8
+ /// 8765432198.7654... => 8765432198.8
+ /// 765432198.7654... => 765432198.8
+ /// 65432198.7654... => 65432198.77
+ /// 5432198.7654... => 5432198.765
+ std::string toString(unsigned Precision = DefaultPrecision) {
+ return UnsignedFloatBase::toString(Digits, Exponent, Width, Precision);
+ }
+
+ /// \brief Print a decimal representation.
+ ///
+ /// Print a string. See toString for documentation.
+ raw_ostream &print(raw_ostream &OS,
+ unsigned Precision = DefaultPrecision) const {
+ return UnsignedFloatBase::print(OS, Digits, Exponent, Width, Precision);
+ }
+ void dump() const { return UnsignedFloatBase::dump(Digits, Exponent, Width); }
+
+ UnsignedFloat &operator+=(const UnsignedFloat &X);
+ UnsignedFloat &operator-=(const UnsignedFloat &X);
+ UnsignedFloat &operator*=(const UnsignedFloat &X);
+ UnsignedFloat &operator/=(const UnsignedFloat &X);
+ UnsignedFloat &operator<<=(int16_t Shift) { shiftLeft(Shift); return *this; }
+ UnsignedFloat &operator>>=(int16_t Shift) { shiftRight(Shift); return *this; }
+
+private:
+ void shiftLeft(int32_t Shift);
+ void shiftRight(int32_t Shift);
+
+ /// \brief Adjust two floats to have matching exponents.
+ ///
+ /// Adjust \c this and \c X to have matching exponents. Returns the new \c X
+ /// by value. Does nothing if \a isZero() for either.
+ ///
+ /// The value that compares smaller will lose precision, and possibly become
+ /// \a isZero().
+ UnsignedFloat matchExponents(UnsignedFloat X);
+
+ /// \brief Increase exponent to match another float.
+ ///
+ /// Increases \c this to have an exponent matching \c X. May decrease the
+ /// exponent of \c X in the process, and \c this may possibly become \a
+ /// isZero().
+ void increaseExponentToMatch(UnsignedFloat &X, int32_t ExponentDiff);
+
+public:
+ /// \brief Scale a large number accurately.
+ ///
+ /// Scale N (multiply it by this). Uses full precision multiplication, even
+ /// if Width is smaller than 64, so information is not lost.
+ uint64_t scale(uint64_t N) const;
+ uint64_t scaleByInverse(uint64_t N) const {
+ // TODO: implement directly, rather than relying on inverse. Inverse is
+ // expensive.
+ return inverse().scale(N);
+ }
+ int64_t scale(int64_t N) const {
+ std::pair<uint64_t, bool> Unsigned = splitSigned(N);
+ return joinSigned(scale(Unsigned.first), Unsigned.second);
+ }
+ int64_t scaleByInverse(int64_t N) const {
+ std::pair<uint64_t, bool> Unsigned = splitSigned(N);
+ return joinSigned(scaleByInverse(Unsigned.first), Unsigned.second);
+ }
+
+ int compare(const UnsignedFloat &X) const;
+ int compareTo(uint64_t N) const {
+ UnsignedFloat Float = getFloat(N);
+ int Compare = compare(Float);
+ if (Width == 64 || Compare != 0)
+ return Compare;
+
+ // Check for precision loss. We know *this == RoundTrip.
+ uint64_t RoundTrip = Float.template toInt<uint64_t>();
+ return N == RoundTrip ? 0 : RoundTrip < N ? -1 : 1;
+ }
+ int compareTo(int64_t N) const { return N < 0 ? 1 : compareTo(uint64_t(N)); }
+
+ UnsignedFloat &invert() { return *this = UnsignedFloat::getFloat(1) / *this; }
+ UnsignedFloat inverse() const { return UnsignedFloat(*this).invert(); }
+
+private:
+ static UnsignedFloat getProduct(DigitsType L, DigitsType R);
+ static UnsignedFloat getQuotient(DigitsType Dividend, DigitsType Divisor);
+
+ std::pair<int32_t, int> lgImpl() const;
+ static int countLeadingZerosWidth(DigitsType Digits) {
+ if (Width == 64)
+ return countLeadingZeros64(Digits);
+ if (Width == 32)
+ return countLeadingZeros32(Digits);
+ return countLeadingZeros32(Digits) + Width - 32;
+ }
+
+ static UnsignedFloat adjustToWidth(uint64_t N, int32_t S) {
+ assert(S >= MinExponent);
+ assert(S <= MaxExponent);
+ if (Width == 64 || N <= DigitsLimits::max())
+ return UnsignedFloat(N, S);
+
+ // Shift right.
+ int Shift = 64 - Width - countLeadingZeros64(N);
+ DigitsType Shifted = N >> Shift;
+
+ // Round.
+ assert(S + Shift <= MaxExponent);
+ return getRounded(UnsignedFloat(Shifted, S + Shift),
+ N & UINT64_C(1) << (Shift - 1));
+ }
+
+ static UnsignedFloat getRounded(UnsignedFloat P, bool Round) {
+ if (!Round)
+ return P;
+ if (P.Digits == DigitsLimits::max())
+ // Careful of overflow in the exponent.
+ return UnsignedFloat(1, P.Exponent) <<= Width;
+ return UnsignedFloat(P.Digits + 1, P.Exponent);
+ }
+};
+
+#define UNSIGNED_FLOAT_BOP(op, base) \
+ template <class DigitsT> \
+ UnsignedFloat<DigitsT> operator op(const UnsignedFloat<DigitsT> &L, \
+ const UnsignedFloat<DigitsT> &R) { \
+ return UnsignedFloat<DigitsT>(L) base R; \
+ }
+UNSIGNED_FLOAT_BOP(+, += )
+UNSIGNED_FLOAT_BOP(-, -= )
+UNSIGNED_FLOAT_BOP(*, *= )
+UNSIGNED_FLOAT_BOP(/, /= )
+UNSIGNED_FLOAT_BOP(<<, <<= )
+UNSIGNED_FLOAT_BOP(>>, >>= )
+#undef UNSIGNED_FLOAT_BOP
+
+template <class DigitsT>
+raw_ostream &operator<<(raw_ostream &OS, const UnsignedFloat<DigitsT> &X) {
+ return X.print(OS, 10);
+}
+
+#define UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, T1, T2) \
+ template <class DigitsT> \
+ bool operator op(const UnsignedFloat<DigitsT> &L, T1 R) { \
+ return L.compareTo(T2(R)) op 0; \
+ } \
+ template <class DigitsT> \
+ bool operator op(T1 L, const UnsignedFloat<DigitsT> &R) { \
+ return 0 op R.compareTo(T2(L)); \
+ }
+#define UNSIGNED_FLOAT_COMPARE_TO(op) \
+ UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, uint64_t, uint64_t) \
+ UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, uint32_t, uint64_t) \
+ UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, int64_t, int64_t) \
+ UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, int32_t, int64_t)
+UNSIGNED_FLOAT_COMPARE_TO(< )
+UNSIGNED_FLOAT_COMPARE_TO(> )
+UNSIGNED_FLOAT_COMPARE_TO(== )
+UNSIGNED_FLOAT_COMPARE_TO(!= )
+UNSIGNED_FLOAT_COMPARE_TO(<= )
+UNSIGNED_FLOAT_COMPARE_TO(>= )
+#undef UNSIGNED_FLOAT_COMPARE_TO
+#undef UNSIGNED_FLOAT_COMPARE_TO_TYPE
+
+template <class DigitsT>
+uint64_t UnsignedFloat<DigitsT>::scale(uint64_t N) const {
+ if (Width == 64 || N <= DigitsLimits::max())
+ return (getFloat(N) * *this).template toInt<uint64_t>();
+
+ // Defer to the 64-bit version.
+ return UnsignedFloat<uint64_t>(Digits, Exponent).scale(N);
+}
+
+template <class DigitsT>
+UnsignedFloat<DigitsT> UnsignedFloat<DigitsT>::getProduct(DigitsType L,
+ DigitsType R) {
+ // Check for zero.
+ if (!L || !R)
+ return getZero();
+
+ // Check for numbers that we can compute with 64-bit math.
+ if (Width <= 32 || (L <= UINT32_MAX && R <= UINT32_MAX))
+ return adjustToWidth(uint64_t(L) * uint64_t(R), 0);
+
+ // Do the full thing.
+ return UnsignedFloat(multiply64(L, R));
+}
+template <class DigitsT>
+UnsignedFloat<DigitsT> UnsignedFloat<DigitsT>::getQuotient(DigitsType Dividend,
+ DigitsType Divisor) {
+ // Check for zero.
+ if (!Dividend)
+ return getZero();
+ if (!Divisor)
+ return getLargest();
+
+ if (Width == 64)
+ return UnsignedFloat(divide64(Dividend, Divisor));
+
+ // We can compute this with 64-bit math.
+ int Shift = countLeadingZeros64(Dividend);
+ uint64_t Shifted = uint64_t(Dividend) << Shift;
+ uint64_t Quotient = Shifted / Divisor;
+
+ // If Quotient needs to be shifted, then adjustToWidth will round.
+ if (Quotient > DigitsLimits::max())
+ return adjustToWidth(Quotient, -Shift);
+
+ // Round based on the value of the next bit.
+ return getRounded(UnsignedFloat(Quotient, -Shift),
+ Shifted % Divisor >= getHalf(Divisor));
+}
+
+template <class DigitsT>
+template <class IntT>
+IntT UnsignedFloat<DigitsT>::toInt() const {
+ typedef std::numeric_limits<IntT> Limits;
+ if (*this < 1)
+ return 0;
+ if (*this >= Limits::max())
+ return Limits::max();
+
+ IntT N = Digits;
+ if (Exponent > 0) {
+ assert(size_t(Exponent) < sizeof(IntT) * 8);
+ return N << Exponent;
+ }
+ if (Exponent < 0) {
+ assert(size_t(-Exponent) < sizeof(IntT) * 8);
+ return N >> -Exponent;
+ }
+ return N;
+}
+
+template <class DigitsT>
+std::pair<int32_t, int> UnsignedFloat<DigitsT>::lgImpl() const {
+ if (isZero())
+ return std::make_pair(INT32_MIN, 0);
+
+ // Get the floor of the lg of Digits.
+ int32_t LocalFloor = Width - countLeadingZerosWidth(Digits) - 1;
+
+ // Get the floor of the lg of this.
+ int32_t Floor = Exponent + LocalFloor;
+ if (Digits == UINT64_C(1) << LocalFloor)
+ return std::make_pair(Floor, 0);
+
+ // Round based on the next digit.
+ assert(LocalFloor >= 1);
+ bool Round = Digits & UINT64_C(1) << (LocalFloor - 1);
+ return std::make_pair(Floor + Round, Round ? 1 : -1);
+}
+
+template <class DigitsT>
+UnsignedFloat<DigitsT> UnsignedFloat<DigitsT>::matchExponents(UnsignedFloat X) {
+ if (isZero() || X.isZero() || Exponent == X.Exponent)
+ return X;
+
+ int32_t Diff = int32_t(X.Exponent) - int32_t(Exponent);
+ if (Diff > 0)
+ increaseExponentToMatch(X, Diff);
+ else
+ X.increaseExponentToMatch(*this, -Diff);
+ return X;
+}
+template <class DigitsT>
+void UnsignedFloat<DigitsT>::increaseExponentToMatch(UnsignedFloat &X,
+ int32_t ExponentDiff) {
+ assert(ExponentDiff > 0);
+ if (ExponentDiff >= 2 * Width) {
+ *this = getZero();
+ return;
+ }
+
+ // Use up any leading zeros on X, and then shift this.
+ int32_t ShiftX = std::min(countLeadingZerosWidth(X.Digits), ExponentDiff);
+ assert(ShiftX < Width);
+
+ int32_t ShiftThis = ExponentDiff - ShiftX;
+ if (ShiftThis >= Width) {
+ *this = getZero();
+ return;
+ }
+
+ X.Digits <<= ShiftX;
+ X.Exponent -= ShiftX;
+ Digits >>= ShiftThis;
+ Exponent += ShiftThis;
+ return;
+}
+
+template <class DigitsT>
+UnsignedFloat<DigitsT> &UnsignedFloat<DigitsT>::
+operator+=(const UnsignedFloat &X) {
+ if (isLargest() || X.isZero())
+ return *this;
+ if (isZero() || X.isLargest())
+ return *this = X;
+
+ // Normalize exponents.
+ UnsignedFloat Scaled = matchExponents(X);
+
+ // Check for zero again.
+ if (isZero())
+ return *this = Scaled;
+ if (Scaled.isZero())
+ return *this;
+
+ // Compute sum.
+ DigitsType Sum = Digits + Scaled.Digits;
+ bool DidOverflow = Sum < Digits;
+ Digits = Sum;
+ if (!DidOverflow)
+ return *this;
+
+ if (Exponent == MaxExponent)
+ return *this = getLargest();
+
+ ++Exponent;
+ Digits = UINT64_C(1) << (Width - 1) | Digits >> 1;
+
+ return *this;
+}
+template <class DigitsT>
+UnsignedFloat<DigitsT> &UnsignedFloat<DigitsT>::
+operator-=(const UnsignedFloat &X) {
+ if (X.isZero())
+ return *this;
+ if (*this <= X)
+ return *this = getZero();
+
+ // Normalize exponents.
+ UnsignedFloat Scaled = matchExponents(X);
+ assert(Digits >= Scaled.Digits);
+
+ // Compute difference.
+ if (!Scaled.isZero()) {
+ Digits -= Scaled.Digits;
+ return *this;
+ }
+
+ // Check if X just barely lost its last bit. E.g., for 32-bit:
+ //
+ // 1*2^32 - 1*2^0 == 0xffffffff != 1*2^32
+ if (*this == UnsignedFloat(1, X.lgFloor() + Width)) {
+ Digits = DigitsType(0) - 1;
+ --Exponent;
+ }
+ return *this;
+}
+template <class DigitsT>
+UnsignedFloat<DigitsT> &UnsignedFloat<DigitsT>::
+operator*=(const UnsignedFloat &X) {
+ if (isZero())
+ return *this;
+ if (X.isZero())
+ return *this = X;
+
+ // Save the exponents.
+ int32_t Exponents = int32_t(Exponent) + int32_t(X.Exponent);
+
+ // Get the raw product.
+ *this = getProduct(Digits, X.Digits);
+
+ // Combine with exponents.
+ return *this <<= Exponents;
+}
+template <class DigitsT>
+UnsignedFloat<DigitsT> &UnsignedFloat<DigitsT>::
+operator/=(const UnsignedFloat &X) {
+ if (isZero())
+ return *this;
+ if (X.isZero())
+ return *this = getLargest();
+
+ // Save the exponents.
+ int32_t Exponents = int32_t(Exponent) - int32_t(X.Exponent);
+
+ // Get the raw quotient.
+ *this = getQuotient(Digits, X.Digits);
+
+ // Combine with exponents.
+ return *this <<= Exponents;
+}
+template <class DigitsT>
+void UnsignedFloat<DigitsT>::shiftLeft(int32_t Shift) {
+ if (!Shift || isZero())
+ return;
+ assert(Shift != INT32_MIN);
+ if (Shift < 0) {
+ shiftRight(-Shift);
+ return;
+ }
+
+ // Shift as much as we can in the exponent.
+ int32_t ExponentShift = std::min(Shift, MaxExponent - Exponent);
+ Exponent += ExponentShift;
+ if (ExponentShift == Shift)
+ return;
+
+ // Check this late, since it's rare.
+ if (isLargest())
+ return;
+
+ // Shift the digits themselves.
+ Shift -= ExponentShift;
+ if (Shift > countLeadingZerosWidth(Digits)) {
+ // Saturate.
+ *this = getLargest();
+ return;
+ }
+
+ Digits <<= Shift;
+ return;
+}
+
+template <class DigitsT>
+void UnsignedFloat<DigitsT>::shiftRight(int32_t Shift) {
+ if (!Shift || isZero())
+ return;
+ assert(Shift != INT32_MIN);
+ if (Shift < 0) {
+ shiftLeft(-Shift);
+ return;
+ }
+
+ // Shift as much as we can in the exponent.
+ int32_t ExponentShift = std::min(Shift, Exponent - MinExponent);
+ Exponent -= ExponentShift;
+ if (ExponentShift == Shift)
+ return;
+
+ // Shift the digits themselves.
+ Shift -= ExponentShift;
+ if (Shift >= Width) {
+ // Saturate.
+ *this = getZero();
+ return;
+ }
+
+ Digits >>= Shift;
+ return;
+}
+
+template <class DigitsT>
+int UnsignedFloat<DigitsT>::compare(const UnsignedFloat &X) const {
+ // Check for zero.
+ if (isZero())
+ return X.isZero() ? 0 : -1;
+ if (X.isZero())
+ return 1;
+
+ // Check for the scale. Use lgFloor to be sure that the exponent difference
+ // is always lower than 64.
+ int32_t lgL = lgFloor(), lgR = X.lgFloor();
+ if (lgL != lgR)
+ return lgL < lgR ? -1 : 1;
+
+ // Compare digits.
+ if (Exponent < X.Exponent)
+ return UnsignedFloatBase::compare(Digits, X.Digits, X.Exponent - Exponent);
+
+ return -UnsignedFloatBase::compare(X.Digits, Digits, Exponent - X.Exponent);
+}
+
+template <class T> struct isPodLike<UnsignedFloat<T>> {
+ static const bool value = true;
+};
+}
+
+//===----------------------------------------------------------------------===//
+//
+// BlockMass definition.
+//
+// TODO: Make this private to BlockFrequencyInfoImpl or delete.
+//
+//===----------------------------------------------------------------------===//
+namespace llvm {
+
+/// \brief Mass of a block.
+///
+/// This class implements a sort of fixed-point fraction always between 0.0 and
+/// 1.0. getMass() == UINT64_MAX indicates a value of 1.0.
+///
+/// Masses can be added and subtracted. Simple saturation arithmetic is used,
+/// so arithmetic operations never overflow or underflow.
+///
+/// Masses can be multiplied. Multiplication treats full mass as 1.0 and uses
+/// an inexpensive floating-point algorithm that's off-by-one (almost, but not
+/// quite, maximum precision).
+///
+/// Masses can be scaled by \a BranchProbability at maximum precision.
+class BlockMass {
+ uint64_t Mass;
+
+public:
+ BlockMass() : Mass(0) {}
+ explicit BlockMass(uint64_t Mass) : Mass(Mass) {}
+
+ static BlockMass getEmpty() { return BlockMass(); }
+ static BlockMass getFull() { return BlockMass(UINT64_MAX); }
+
+ uint64_t getMass() const { return Mass; }
+
+ bool isFull() const { return Mass == UINT64_MAX; }
+ bool isEmpty() const { return !Mass; }
+
+ bool operator!() const { return isEmpty(); }
+
+ /// \brief Add another mass.
+ ///
+ /// Adds another mass, saturating at \a isFull() rather than overflowing.
+ BlockMass &operator+=(const BlockMass &X) {
+ uint64_t Sum = Mass + X.Mass;
+ Mass = Sum < Mass ? UINT64_MAX : Sum;
+ return *this;
+ }
+
+ /// \brief Subtract another mass.
+ ///
+ /// Subtracts another mass, saturating at \a isEmpty() rather than
+ /// undeflowing.
+ BlockMass &operator-=(const BlockMass &X) {
+ uint64_t Diff = Mass - X.Mass;
+ Mass = Diff > Mass ? 0 : Diff;
+ return *this;
+ }
+
+ BlockMass &operator*=(const BranchProbability &P) {
+ Mass = P.scale(Mass);
+ return *this;
+ }
+
+ bool operator==(const BlockMass &X) const { return Mass == X.Mass; }
+ bool operator!=(const BlockMass &X) const { return Mass != X.Mass; }
+ bool operator<=(const BlockMass &X) const { return Mass <= X.Mass; }
+ bool operator>=(const BlockMass &X) const { return Mass >= X.Mass; }
+ bool operator<(const BlockMass &X) const { return Mass < X.Mass; }
+ bool operator>(const BlockMass &X) const { return Mass > X.Mass; }
+
+ /// \brief Convert to floating point.
+ ///
+ /// Convert to a float. \a isFull() gives 1.0, while \a isEmpty() gives
+ /// slightly above 0.0.
+ UnsignedFloat<uint64_t> toFloat() const;
+
+ void dump() const;
+ raw_ostream &print(raw_ostream &OS) const;
+};
+
+inline BlockMass operator+(const BlockMass &L, const BlockMass &R) {
+ return BlockMass(L) += R;
+}
+inline BlockMass operator-(const BlockMass &L, const BlockMass &R) {
+ return BlockMass(L) -= R;
+}
+inline BlockMass operator*(const BlockMass &L, const BranchProbability &R) {
+ return BlockMass(L) *= R;
+}
+inline BlockMass operator*(const BranchProbability &L, const BlockMass &R) {
+ return BlockMass(R) *= L;
+}
+
+inline raw_ostream &operator<<(raw_ostream &OS, const BlockMass &X) {
+ return X.print(OS);
+}
+
+template <> struct isPodLike<BlockMass> {
+ static const bool value = true;
+};
+}
+
+//===----------------------------------------------------------------------===//
+//
+// BlockFrequencyInfoImpl definition.
+//
+//===----------------------------------------------------------------------===//
+namespace llvm {
+
+class BasicBlock;
+class BranchProbabilityInfo;
+class Function;
+class Loop;
+class LoopInfo;
+class MachineBasicBlock;
+class MachineBranchProbabilityInfo;
+class MachineFunction;
+class MachineLoop;
+class MachineLoopInfo;
+
+namespace bfi_detail {
+struct IrreducibleGraph;
+
+// This is part of a workaround for a GCC 4.7 crash on lambdas.
+template <class BT> struct BlockEdgesAdder;
+}
+
+/// \brief Base class for BlockFrequencyInfoImpl
+///
+/// BlockFrequencyInfoImplBase has supporting data structures and some
+/// algorithms for BlockFrequencyInfoImplBase. Only algorithms that depend on
+/// the block type (or that call such algorithms) are skipped here.
+///
+/// Nevertheless, the majority of the overall algorithm documention lives with
+/// BlockFrequencyInfoImpl. See there for details.
+class BlockFrequencyInfoImplBase {
+public:
+ typedef UnsignedFloat<uint64_t> Float;
+
+ /// \brief Representative of a block.
+ ///
+ /// This is a simple wrapper around an index into the reverse-post-order
+ /// traversal of the blocks.
+ ///
+ /// Unlike a block pointer, its order has meaning (location in the
+ /// topological sort) and it's class is the same regardless of block type.
+ struct BlockNode {
+ typedef uint32_t IndexType;
+ IndexType Index;
+
+ bool operator==(const BlockNode &X) const { return Index == X.Index; }
+ bool operator!=(const BlockNode &X) const { return Index != X.Index; }
+ bool operator<=(const BlockNode &X) const { return Index <= X.Index; }
+ bool operator>=(const BlockNode &X) const { return Index >= X.Index; }
+ bool operator<(const BlockNode &X) const { return Index < X.Index; }
+ bool operator>(const BlockNode &X) const { return Index > X.Index; }
+
+ BlockNode() : Index(UINT32_MAX) {}
+ BlockNode(IndexType Index) : Index(Index) {}
+
+ bool isValid() const { return Index <= getMaxIndex(); }
+ static size_t getMaxIndex() { return UINT32_MAX - 1; }
+ };
+
+ /// \brief Stats about a block itself.
+ struct FrequencyData {
+ Float Floating;
+ uint64_t Integer;
+ };
+
+ /// \brief Data about a loop.
+ ///
+ /// Contains the data necessary to represent represent a loop as a
+ /// pseudo-node once it's packaged.
+ struct LoopData {
+ typedef SmallVector<std::pair<BlockNode, BlockMass>, 4> ExitMap;
+ typedef SmallVector<BlockNode, 4> NodeList;
+ LoopData *Parent; ///< The parent loop.
+ bool IsPackaged; ///< Whether this has been packaged.
+ uint32_t NumHeaders; ///< Number of headers.
+ ExitMap Exits; ///< Successor edges (and weights).
+ NodeList Nodes; ///< Header and the members of the loop.
+ BlockMass BackedgeMass; ///< Mass returned to loop header.
+ BlockMass Mass;
+ Float Scale;
+
+ LoopData(LoopData *Parent, const BlockNode &Header)
+ : Parent(Parent), IsPackaged(false), NumHeaders(1), Nodes(1, Header) {}
+ template <class It1, class It2>
+ LoopData(LoopData *Parent, It1 FirstHeader, It1 LastHeader, It2 FirstOther,
+ It2 LastOther)
+ : Parent(Parent), IsPackaged(false), Nodes(FirstHeader, LastHeader) {
+ NumHeaders = Nodes.size();
+ Nodes.insert(Nodes.end(), FirstOther, LastOther);
+ }
+ bool isHeader(const BlockNode &Node) const {
+ if (isIrreducible())
+ return std::binary_search(Nodes.begin(), Nodes.begin() + NumHeaders,
+ Node);
+ return Node == Nodes[0];
+ }
+ BlockNode getHeader() const { return Nodes[0]; }
+ bool isIrreducible() const { return NumHeaders > 1; }
+
+ NodeList::const_iterator members_begin() const {
+ return Nodes.begin() + NumHeaders;
+ }
+ NodeList::const_iterator members_end() const { return Nodes.end(); }
+ iterator_range<NodeList::const_iterator> members() const {
+ return make_range(members_begin(), members_end());
+ }
+ };
+
+ /// \brief Index of loop information.
+ struct WorkingData {
+ BlockNode Node; ///< This node.
+ LoopData *Loop; ///< The loop this block is inside.
+ BlockMass Mass; ///< Mass distribution from the entry block.
+
+ WorkingData(const BlockNode &Node) : Node(Node), Loop(nullptr) {}
+
+ bool isLoopHeader() const { return Loop && Loop->isHeader(Node); }
+ bool isDoubleLoopHeader() const {
+ return isLoopHeader() && Loop->Parent && Loop->Parent->isIrreducible() &&
+ Loop->Parent->isHeader(Node);
+ }
+
+ LoopData *getContainingLoop() const {
+ if (!isLoopHeader())
+ return Loop;
+ if (!isDoubleLoopHeader())
+ return Loop->Parent;
+ return Loop->Parent->Parent;
+ }
+
+ /// \brief Resolve a node to its representative.
+ ///
+ /// Get the node currently representing Node, which could be a containing
+ /// loop.
+ ///
+ /// This function should only be called when distributing mass. As long as
+ /// there are no irreducilbe edges to Node, then it will have complexity
+ /// O(1) in this context.
+ ///
+ /// In general, the complexity is O(L), where L is the number of loop
+ /// headers Node has been packaged into. Since this method is called in
+ /// the context of distributing mass, L will be the number of loop headers
+ /// an early exit edge jumps out of.
+ BlockNode getResolvedNode() const {
+ auto L = getPackagedLoop();
+ return L ? L->getHeader() : Node;
+ }
+ LoopData *getPackagedLoop() const {
+ if (!Loop || !Loop->IsPackaged)
+ return nullptr;
+ auto L = Loop;
+ while (L->Parent && L->Parent->IsPackaged)
+ L = L->Parent;
+ return L;
+ }
+
+ /// \brief Get the appropriate mass for a node.
+ ///
+ /// Get appropriate mass for Node. If Node is a loop-header (whose loop
+ /// has been packaged), returns the mass of its pseudo-node. If it's a
+ /// node inside a packaged loop, it returns the loop's mass.
+ BlockMass &getMass() {
+ if (!isAPackage())
+ return Mass;
+ if (!isADoublePackage())
+ return Loop->Mass;
+ return Loop->Parent->Mass;
+ }
+
+ /// \brief Has ContainingLoop been packaged up?
+ bool isPackaged() const { return getResolvedNode() != Node; }
+ /// \brief Has Loop been packaged up?
+ bool isAPackage() const { return isLoopHeader() && Loop->IsPackaged; }
+ /// \brief Has Loop been packaged up twice?
+ bool isADoublePackage() const {
+ return isDoubleLoopHeader() && Loop->Parent->IsPackaged;
+ }
+ };
+
+ /// \brief Unscaled probability weight.
+ ///
+ /// Probability weight for an edge in the graph (including the
+ /// successor/target node).
+ ///
+ /// All edges in the original function are 32-bit. However, exit edges from
+ /// loop packages are taken from 64-bit exit masses, so we need 64-bits of
+ /// space in general.
+ ///
+ /// In addition to the raw weight amount, Weight stores the type of the edge
+ /// in the current context (i.e., the context of the loop being processed).
+ /// Is this a local edge within the loop, an exit from the loop, or a
+ /// backedge to the loop header?
+ struct Weight {
+ enum DistType { Local, Exit, Backedge };
+ DistType Type;
+ BlockNode TargetNode;
+ uint64_t Amount;
+ Weight() : Type(Local), Amount(0) {}
+ };
+
+ /// \brief Distribution of unscaled probability weight.
+ ///
+ /// Distribution of unscaled probability weight to a set of successors.
+ ///
+ /// This class collates the successor edge weights for later processing.
+ ///
+ /// \a DidOverflow indicates whether \a Total did overflow while adding to
+ /// the distribution. It should never overflow twice.
+ struct Distribution {
+ typedef SmallVector<Weight, 4> WeightList;
+ WeightList Weights; ///< Individual successor weights.
+ uint64_t Total; ///< Sum of all weights.
+ bool DidOverflow; ///< Whether \a Total did overflow.
+
+ Distribution() : Total(0), DidOverflow(false) {}
+ void addLocal(const BlockNode &Node, uint64_t Amount) {
+ add(Node, Amount, Weight::Local);
+ }
+ void addExit(const BlockNode &Node, uint64_t Amount) {
+ add(Node, Amount, Weight::Exit);
+ }
+ void addBackedge(const BlockNode &Node, uint64_t Amount) {
+ add(Node, Amount, Weight::Backedge);
+ }
+
+ /// \brief Normalize the distribution.
+ ///
+ /// Combines multiple edges to the same \a Weight::TargetNode and scales
+ /// down so that \a Total fits into 32-bits.
+ ///
+ /// This is linear in the size of \a Weights. For the vast majority of
+ /// cases, adjacent edge weights are combined by sorting WeightList and
+ /// combining adjacent weights. However, for very large edge lists an
+ /// auxiliary hash table is used.
+ void normalize();
+
+ private:
+ void add(const BlockNode &Node, uint64_t Amount, Weight::DistType Type);
+ };
+
+ /// \brief Data about each block. This is used downstream.
+ std::vector<FrequencyData> Freqs;
+
+ /// \brief Loop data: see initializeLoops().
+ std::vector<WorkingData> Working;
+
+ /// \brief Indexed information about loops.
+ std::list<LoopData> Loops;
+
+ /// \brief Add all edges out of a packaged loop to the distribution.
+ ///
+ /// Adds all edges from LocalLoopHead to Dist. Calls addToDist() to add each
+ /// successor edge.
+ ///
+ /// \return \c true unless there's an irreducible backedge.
+ bool addLoopSuccessorsToDist(const LoopData *OuterLoop, LoopData &Loop,
+ Distribution &Dist);
+
+ /// \brief Add an edge to the distribution.
+ ///
+ /// Adds an edge to Succ to Dist. If \c LoopHead.isValid(), then whether the
+ /// edge is local/exit/backedge is in the context of LoopHead. Otherwise,
+ /// every edge should be a local edge (since all the loops are packaged up).
+ ///
+ /// \return \c true unless aborted due to an irreducible backedge.
+ bool addToDist(Distribution &Dist, const LoopData *OuterLoop,
+ const BlockNode &Pred, const BlockNode &Succ, uint64_t Weight);
+
+ LoopData &getLoopPackage(const BlockNode &Head) {
+ assert(Head.Index < Working.size());
+ assert(Working[Head.Index].isLoopHeader());
+ return *Working[Head.Index].Loop;
+ }
+
+ /// \brief Analyze irreducible SCCs.
+ ///
+ /// Separate irreducible SCCs from \c G, which is an explict graph of \c
+ /// OuterLoop (or the top-level function, if \c OuterLoop is \c nullptr).
+ /// Insert them into \a Loops before \c Insert.
+ ///
+ /// \return the \c LoopData nodes representing the irreducible SCCs.
+ iterator_range<std::list<LoopData>::iterator>
+ analyzeIrreducible(const bfi_detail::IrreducibleGraph &G, LoopData *OuterLoop,
+ std::list<LoopData>::iterator Insert);
+
+ /// \brief Update a loop after packaging irreducible SCCs inside of it.
+ ///
+ /// Update \c OuterLoop. Before finding irreducible control flow, it was
+ /// partway through \a computeMassInLoop(), so \a LoopData::Exits and \a
+ /// LoopData::BackedgeMass need to be reset. Also, nodes that were packaged
+ /// up need to be removed from \a OuterLoop::Nodes.
+ void updateLoopWithIrreducible(LoopData &OuterLoop);
+
+ /// \brief Distribute mass according to a distribution.
+ ///
+ /// Distributes the mass in Source according to Dist. If LoopHead.isValid(),
+ /// backedges and exits are stored in its entry in Loops.
+ ///
+ /// Mass is distributed in parallel from two copies of the source mass.
+ void distributeMass(const BlockNode &Source, LoopData *OuterLoop,
+ Distribution &Dist);
+
+ /// \brief Compute the loop scale for a loop.
+ void computeLoopScale(LoopData &Loop);
+
+ /// \brief Package up a loop.
+ void packageLoop(LoopData &Loop);
+
+ /// \brief Unwrap loops.
+ void unwrapLoops();
+
+ /// \brief Finalize frequency metrics.
+ ///
+ /// Calculates final frequencies and cleans up no-longer-needed data
+ /// structures.
+ void finalizeMetrics();
+
+ /// \brief Clear all memory.
+ void clear();
+
+ virtual std::string getBlockName(const BlockNode &Node) const;
+ std::string getLoopName(const LoopData &Loop) const;
+
+ virtual raw_ostream &print(raw_ostream &OS) const { return OS; }
+ void dump() const { print(dbgs()); }
+
+ Float getFloatingBlockFreq(const BlockNode &Node) const;
+
+ BlockFrequency getBlockFreq(const BlockNode &Node) const;
+
+ raw_ostream &printBlockFreq(raw_ostream &OS, const BlockNode &Node) const;
+ raw_ostream &printBlockFreq(raw_ostream &OS,
+ const BlockFrequency &Freq) const;
+
+ uint64_t getEntryFreq() const {
+ assert(!Freqs.empty());
+ return Freqs[0].Integer;
+ }
+ /// \brief Virtual destructor.
+ ///
+ /// Need a virtual destructor to mask the compiler warning about
+ /// getBlockName().
+ virtual ~BlockFrequencyInfoImplBase() {}
+};
+
+namespace bfi_detail {
+template <class BlockT> struct TypeMap {};
+template <> struct TypeMap<BasicBlock> {
+ typedef BasicBlock BlockT;
+ typedef Function FunctionT;
+ typedef BranchProbabilityInfo BranchProbabilityInfoT;
+ typedef Loop LoopT;
+ typedef LoopInfo LoopInfoT;
+};
+template <> struct TypeMap<MachineBasicBlock> {
+ typedef MachineBasicBlock BlockT;
+ typedef MachineFunction FunctionT;
+ typedef MachineBranchProbabilityInfo BranchProbabilityInfoT;
+ typedef MachineLoop LoopT;
+ typedef MachineLoopInfo LoopInfoT;
+};
+
+/// \brief Get the name of a MachineBasicBlock.
+///
+/// Get the name of a MachineBasicBlock. It's templated so that including from
+/// CodeGen is unnecessary (that would be a layering issue).
+///
+/// This is used mainly for debug output. The name is similar to
+/// MachineBasicBlock::getFullName(), but skips the name of the function.
+template <class BlockT> std::string getBlockName(const BlockT *BB) {
+ assert(BB && "Unexpected nullptr");
+ auto MachineName = "BB" + Twine(BB->getNumber());
+ if (BB->getBasicBlock())
+ return (MachineName + "[" + BB->getName() + "]").str();
+ return MachineName.str();
+}
+/// \brief Get the name of a BasicBlock.
+template <> inline std::string getBlockName(const BasicBlock *BB) {
+ assert(BB && "Unexpected nullptr");
+ return BB->getName().str();
+}
+
+/// \brief Graph of irreducible control flow.
+///
+/// This graph is used for determining the SCCs in a loop (or top-level
+/// function) that has irreducible control flow.
+///
+/// During the block frequency algorithm, the local graphs are defined in a
+/// light-weight way, deferring to the \a BasicBlock or \a MachineBasicBlock
+/// graphs for most edges, but getting others from \a LoopData::ExitMap. The
+/// latter only has successor information.
+///
+/// \a IrreducibleGraph makes this graph explicit. It's in a form that can use
+/// \a GraphTraits (so that \a analyzeIrreducible() can use \a scc_iterator),
+/// and it explicitly lists predecessors and successors. The initialization
+/// that relies on \c MachineBasicBlock is defined in the header.
+struct IrreducibleGraph {
+ typedef BlockFrequencyInfoImplBase BFIBase;
+
+ BFIBase &BFI;
+
+ typedef BFIBase::BlockNode BlockNode;
+ struct IrrNode {
+ BlockNode Node;
+ unsigned NumIn;
+ std::deque<const IrrNode *> Edges;
+ IrrNode(const BlockNode &Node) : Node(Node), NumIn(0) {}
+
+ typedef std::deque<const IrrNode *>::const_iterator iterator;
+ iterator pred_begin() const { return Edges.begin(); }
+ iterator succ_begin() const { return Edges.begin() + NumIn; }
+ iterator pred_end() const { return succ_begin(); }
+ iterator succ_end() const { return Edges.end(); }
+ };
+ BlockNode Start;
+ const IrrNode *StartIrr;
+ std::vector<IrrNode> Nodes;
+ SmallDenseMap<uint32_t, IrrNode *, 4> Lookup;
+
+ /// \brief Construct an explicit graph containing irreducible control flow.
+ ///
+ /// Construct an explicit graph of the control flow in \c OuterLoop (or the
+ /// top-level function, if \c OuterLoop is \c nullptr). Uses \c
+ /// addBlockEdges to add block successors that have not been packaged into
+ /// loops.
+ ///
+ /// \a BlockFrequencyInfoImpl::computeIrreducibleMass() is the only expected
+ /// user of this.
+ template <class BlockEdgesAdder>
+ IrreducibleGraph(BFIBase &BFI, const BFIBase::LoopData *OuterLoop,
+ BlockEdgesAdder addBlockEdges)
+ : BFI(BFI), StartIrr(nullptr) {
+ initialize(OuterLoop, addBlockEdges);
+ }
+
+ template <class BlockEdgesAdder>
+ void initialize(const BFIBase::LoopData *OuterLoop,
+ BlockEdgesAdder addBlockEdges);
+ void addNodesInLoop(const BFIBase::LoopData &OuterLoop);
+ void addNodesInFunction();
+ void addNode(const BlockNode &Node) {
+ Nodes.emplace_back(Node);
+ BFI.Working[Node.Index].getMass() = BlockMass::getEmpty();
+ }
+ void indexNodes();
+ template <class BlockEdgesAdder>
+ void addEdges(const BlockNode &Node, const BFIBase::LoopData *OuterLoop,
+ BlockEdgesAdder addBlockEdges);
+ void addEdge(IrrNode &Irr, const BlockNode &Succ,
+ const BFIBase::LoopData *OuterLoop);
+};
+template <class BlockEdgesAdder>
+void IrreducibleGraph::initialize(const BFIBase::LoopData *OuterLoop,
+ BlockEdgesAdder addBlockEdges) {
+ if (OuterLoop) {
+ addNodesInLoop(*OuterLoop);
+ for (auto N : OuterLoop->Nodes)
+ addEdges(N, OuterLoop, addBlockEdges);
+ } else {
+ addNodesInFunction();
+ for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index)
+ addEdges(Index, OuterLoop, addBlockEdges);
+ }
+ StartIrr = Lookup[Start.Index];
+}
+template <class BlockEdgesAdder>
+void IrreducibleGraph::addEdges(const BlockNode &Node,
+ const BFIBase::LoopData *OuterLoop,
+ BlockEdgesAdder addBlockEdges) {
+ auto L = Lookup.find(Node.Index);
+ if (L == Lookup.end())
+ return;
+ IrrNode &Irr = *L->second;
+ const auto &Working = BFI.Working[Node.Index];
+
+ if (Working.isAPackage())
+ for (const auto &I : Working.Loop->Exits)
+ addEdge(Irr, I.first, OuterLoop);
+ else
+ addBlockEdges(*this, Irr, OuterLoop);
+}
+}
+
+/// \brief Shared implementation for block frequency analysis.
+///
+/// This is a shared implementation of BlockFrequencyInfo and
+/// MachineBlockFrequencyInfo, and calculates the relative frequencies of
+/// blocks.
+///
+/// LoopInfo defines a loop as a "non-trivial" SCC dominated by a single block,
+/// which is called the header. A given loop, L, can have sub-loops, which are
+/// loops within the subgraph of L that exclude its header. (A "trivial" SCC
+/// consists of a single block that does not have a self-edge.)
+///
+/// In addition to loops, this algorithm has limited support for irreducible
+/// SCCs, which are SCCs with multiple entry blocks. Irreducible SCCs are
+/// discovered on they fly, and modelled as loops with multiple headers.
+///
+/// The headers of irreducible sub-SCCs consist of its entry blocks and all
+/// nodes that are targets of a backedge within it (excluding backedges within
+/// true sub-loops). Block frequency calculations act as if a block is
+/// inserted that intercepts all the edges to the headers. All backedges and
+/// entries point to this block. Its successors are the headers, which split
+/// the frequency evenly.
+///
+/// This algorithm leverages BlockMass and UnsignedFloat to maintain precision,
+/// separates mass distribution from loop scaling, and dithers to eliminate
+/// probability mass loss.
+///
+/// The implementation is split between BlockFrequencyInfoImpl, which knows the
+/// type of graph being modelled (BasicBlock vs. MachineBasicBlock), and
+/// BlockFrequencyInfoImplBase, which doesn't. The base class uses \a
+/// BlockNode, a wrapper around a uint32_t. BlockNode is numbered from 0 in
+/// reverse-post order. This gives two advantages: it's easy to compare the
+/// relative ordering of two nodes, and maps keyed on BlockT can be represented
+/// by vectors.
+///
+/// This algorithm is O(V+E), unless there is irreducible control flow, in
+/// which case it's O(V*E) in the worst case.
+///
+/// These are the main stages:
+///
+/// 0. Reverse post-order traversal (\a initializeRPOT()).
+///
+/// Run a single post-order traversal and save it (in reverse) in RPOT.
+/// All other stages make use of this ordering. Save a lookup from BlockT
+/// to BlockNode (the index into RPOT) in Nodes.
+///
+/// 1. Loop initialization (\a initializeLoops()).
+///
+/// Translate LoopInfo/MachineLoopInfo into a form suitable for the rest of
+/// the algorithm. In particular, store the immediate members of each loop
+/// in reverse post-order.
+///
+/// 2. Calculate mass and scale in loops (\a computeMassInLoops()).
+///
+/// For each loop (bottom-up), distribute mass through the DAG resulting
+/// from ignoring backedges and treating sub-loops as a single pseudo-node.
+/// Track the backedge mass distributed to the loop header, and use it to
+/// calculate the loop scale (number of loop iterations). Immediate
+/// members that represent sub-loops will already have been visited and
+/// packaged into a pseudo-node.
+///
+/// Distributing mass in a loop is a reverse-post-order traversal through
+/// the loop. Start by assigning full mass to the Loop header. For each
+/// node in the loop:
+///
+/// - Fetch and categorize the weight distribution for its successors.
+/// If this is a packaged-subloop, the weight distribution is stored
+/// in \a LoopData::Exits. Otherwise, fetch it from
+/// BranchProbabilityInfo.
+///
+/// - Each successor is categorized as \a Weight::Local, a local edge
+/// within the current loop, \a Weight::Backedge, a backedge to the
+/// loop header, or \a Weight::Exit, any successor outside the loop.
+/// The weight, the successor, and its category are stored in \a
+/// Distribution. There can be multiple edges to each successor.
+///
+/// - If there's a backedge to a non-header, there's an irreducible SCC.
+/// The usual flow is temporarily aborted. \a
+/// computeIrreducibleMass() finds the irreducible SCCs within the
+/// loop, packages them up, and restarts the flow.
+///
+/// - Normalize the distribution: scale weights down so that their sum
+/// is 32-bits, and coalesce multiple edges to the same node.
+///
+/// - Distribute the mass accordingly, dithering to minimize mass loss,
+/// as described in \a distributeMass().
+///
+/// Finally, calculate the loop scale from the accumulated backedge mass.
+///
+/// 3. Distribute mass in the function (\a computeMassInFunction()).
+///
+/// Finally, distribute mass through the DAG resulting from packaging all
+/// loops in the function. This uses the same algorithm as distributing
+/// mass in a loop, except that there are no exit or backedge edges.
+///
+/// 4. Unpackage loops (\a unwrapLoops()).
+///
+/// Initialize each block's frequency to a floating point representation of
+/// its mass.
+///
+/// Visit loops top-down, scaling the frequencies of its immediate members
+/// by the loop's pseudo-node's frequency.
+///
+/// 5. Convert frequencies to a 64-bit range (\a finalizeMetrics()).
+///
+/// Using the min and max frequencies as a guide, translate floating point
+/// frequencies to an appropriate range in uint64_t.
+///
+/// It has some known flaws.
+///
+/// - Loop scale is limited to 4096 per loop (2^12) to avoid exhausting
+/// BlockFrequency's 64-bit integer precision.
+///
+/// - The model of irreducible control flow is a rough approximation.
+///
+/// Modelling irreducible control flow exactly involves setting up and
+/// solving a group of infinite geometric series. Such precision is
+/// unlikely to be worthwhile, since most of our algorithms give up on
+/// irreducible control flow anyway.
+///
+/// Nevertheless, we might find that we need to get closer. Here's a sort
+/// of TODO list for the model with diminishing returns, to be completed as
+/// necessary.
+///
+/// - The headers for the \a LoopData representing an irreducible SCC
+/// include non-entry blocks. When these extra blocks exist, they
+/// indicate a self-contained irreducible sub-SCC. We could treat them
+/// as sub-loops, rather than arbitrarily shoving the problematic
+/// blocks into the headers of the main irreducible SCC.
+///
+/// - Backedge frequencies are assumed to be evenly split between the
+/// headers of a given irreducible SCC. Instead, we could track the
+/// backedge mass separately for each header, and adjust their relative
+/// frequencies.
+///
+/// - Entry frequencies are assumed to be evenly split between the
+/// headers of a given irreducible SCC, which is the only option if we
+/// need to compute mass in the SCC before its parent loop. Instead,
+/// we could partially compute mass in the parent loop, and stop when
+/// we get to the SCC. Here, we have the correct ratio of entry
+/// masses, which we can use to adjust their relative frequencies.
+/// Compute mass in the SCC, and then continue propagation in the
+/// parent.
+///
+/// - We can propagate mass iteratively through the SCC, for some fixed
+/// number of iterations. Each iteration starts by assigning the entry
+/// blocks their backedge mass from the prior iteration. The final
+/// mass for each block (and each exit, and the total backedge mass
+/// used for computing loop scale) is the sum of all iterations.
+/// (Running this until fixed point would "solve" the geometric
+/// series by simulation.)
+template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
+ typedef typename bfi_detail::TypeMap<BT>::BlockT BlockT;
+ typedef typename bfi_detail::TypeMap<BT>::FunctionT FunctionT;
+ typedef typename bfi_detail::TypeMap<BT>::BranchProbabilityInfoT
+ BranchProbabilityInfoT;
+ typedef typename bfi_detail::TypeMap<BT>::LoopT LoopT;
+ typedef typename bfi_detail::TypeMap<BT>::LoopInfoT LoopInfoT;
+
+ // This is part of a workaround for a GCC 4.7 crash on lambdas.
+ friend struct bfi_detail::BlockEdgesAdder<BT>;
+
+ typedef GraphTraits<const BlockT *> Successor;
+ typedef GraphTraits<Inverse<const BlockT *>> Predecessor;
+
+ const BranchProbabilityInfoT *BPI;
+ const LoopInfoT *LI;
+ const FunctionT *F;
+
+ // All blocks in reverse postorder.
+ std::vector<const BlockT *> RPOT;
+ DenseMap<const BlockT *, BlockNode> Nodes;
+
+ typedef typename std::vector<const BlockT *>::const_iterator rpot_iterator;
+
+ rpot_iterator rpot_begin() const { return RPOT.begin(); }
+ rpot_iterator rpot_end() const { return RPOT.end(); }
+
+ size_t getIndex(const rpot_iterator &I) const { return I - rpot_begin(); }
+
+ BlockNode getNode(const rpot_iterator &I) const {
+ return BlockNode(getIndex(I));
+ }
+ BlockNode getNode(const BlockT *BB) const { return Nodes.lookup(BB); }
+
+ const BlockT *getBlock(const BlockNode &Node) const {
+ assert(Node.Index < RPOT.size());
+ return RPOT[Node.Index];
+ }
+
+ /// \brief Run (and save) a post-order traversal.
+ ///
+ /// Saves a reverse post-order traversal of all the nodes in \a F.
+ void initializeRPOT();
+
+ /// \brief Initialize loop data.
+ ///
+ /// Build up \a Loops using \a LoopInfo. \a LoopInfo gives us a mapping from
+ /// each block to the deepest loop it's in, but we need the inverse. For each
+ /// loop, we store in reverse post-order its "immediate" members, defined as
+ /// the header, the headers of immediate sub-loops, and all other blocks in
+ /// the loop that are not in sub-loops.
+ void initializeLoops();
+
+ /// \brief Propagate to a block's successors.
+ ///
+ /// In the context of distributing mass through \c OuterLoop, divide the mass
+ /// currently assigned to \c Node between its successors.
+ ///
+ /// \return \c true unless there's an irreducible backedge.
+ bool propagateMassToSuccessors(LoopData *OuterLoop, const BlockNode &Node);
+
+ /// \brief Compute mass in a particular loop.
+ ///
+ /// Assign mass to \c Loop's header, and then for each block in \c Loop in
+ /// reverse post-order, distribute mass to its successors. Only visits nodes
+ /// that have not been packaged into sub-loops.
+ ///
+ /// \pre \a computeMassInLoop() has been called for each subloop of \c Loop.
+ /// \return \c true unless there's an irreducible backedge.
+ bool computeMassInLoop(LoopData &Loop);
+
+ /// \brief Try to compute mass in the top-level function.
+ ///
+ /// Assign mass to the entry block, and then for each block in reverse
+ /// post-order, distribute mass to its successors. Skips nodes that have
+ /// been packaged into loops.
+ ///
+ /// \pre \a computeMassInLoops() has been called.
+ /// \return \c true unless there's an irreducible backedge.
+ bool tryToComputeMassInFunction();
+
+ /// \brief Compute mass in (and package up) irreducible SCCs.
+ ///
+ /// Find the irreducible SCCs in \c OuterLoop, add them to \a Loops (in front
+ /// of \c Insert), and call \a computeMassInLoop() on each of them.
+ ///
+ /// If \c OuterLoop is \c nullptr, it refers to the top-level function.
+ ///
+ /// \pre \a computeMassInLoop() has been called for each subloop of \c
+ /// OuterLoop.
+ /// \pre \c Insert points at the the last loop successfully processed by \a
+ /// computeMassInLoop().
+ /// \pre \c OuterLoop has irreducible SCCs.
+ void computeIrreducibleMass(LoopData *OuterLoop,
+ std::list<LoopData>::iterator Insert);
+
+ /// \brief Compute mass in all loops.
+ ///
+ /// For each loop bottom-up, call \a computeMassInLoop().
+ ///
+ /// \a computeMassInLoop() aborts (and returns \c false) on loops that
+ /// contain a irreducible sub-SCCs. Use \a computeIrreducibleMass() and then
+ /// re-enter \a computeMassInLoop().
+ ///
+ /// \post \a computeMassInLoop() has returned \c true for every loop.
+ void computeMassInLoops();
+
+ /// \brief Compute mass in the top-level function.
+ ///
+ /// Uses \a tryToComputeMassInFunction() and \a computeIrreducibleMass() to
+ /// compute mass in the top-level function.
+ ///
+ /// \post \a tryToComputeMassInFunction() has returned \c true.
+ void computeMassInFunction();
+
+ std::string getBlockName(const BlockNode &Node) const override {
+ return bfi_detail::getBlockName(getBlock(Node));
+ }
+
+public:
+ const FunctionT *getFunction() const { return F; }
+
+ void doFunction(const FunctionT *F, const BranchProbabilityInfoT *BPI,
+ const LoopInfoT *LI);
+ BlockFrequencyInfoImpl() : BPI(nullptr), LI(nullptr), F(nullptr) {}
+
+ using BlockFrequencyInfoImplBase::getEntryFreq;
+ BlockFrequency getBlockFreq(const BlockT *BB) const {
+ return BlockFrequencyInfoImplBase::getBlockFreq(getNode(BB));
+ }
+ Float getFloatingBlockFreq(const BlockT *BB) const {
+ return BlockFrequencyInfoImplBase::getFloatingBlockFreq(getNode(BB));
+ }
+
+ /// \brief Print the frequencies for the current function.
+ ///
+ /// Prints the frequencies for the blocks in the current function.
+ ///
+ /// Blocks are printed in the natural iteration order of the function, rather
+ /// than reverse post-order. This provides two advantages: writing -analyze
+ /// tests is easier (since blocks come out in source order), and even
+ /// unreachable blocks are printed.
+ ///
+ /// \a BlockFrequencyInfoImplBase::print() only knows reverse post-order, so
+ /// we need to override it here.
+ raw_ostream &print(raw_ostream &OS) const override;
+ using BlockFrequencyInfoImplBase::dump;
+
+ using BlockFrequencyInfoImplBase::printBlockFreq;
+ raw_ostream &printBlockFreq(raw_ostream &OS, const BlockT *BB) const {
+ return BlockFrequencyInfoImplBase::printBlockFreq(OS, getNode(BB));
+ }
+};
+
+template <class BT>
+void BlockFrequencyInfoImpl<BT>::doFunction(const FunctionT *F,
+ const BranchProbabilityInfoT *BPI,
+ const LoopInfoT *LI) {
+ // Save the parameters.
+ this->BPI = BPI;
+ this->LI = LI;
+ this->F = F;
+
+ // Clean up left-over data structures.
+ BlockFrequencyInfoImplBase::clear();
+ RPOT.clear();
+ Nodes.clear();
+
+ // Initialize.
+ DEBUG(dbgs() << "\nblock-frequency: " << F->getName() << "\n================="
+ << std::string(F->getName().size(), '=') << "\n");
+ initializeRPOT();
+ initializeLoops();
+
+ // Visit loops in post-order to find thelocal mass distribution, and then do
+ // the full function.
+ computeMassInLoops();
+ computeMassInFunction();
+ unwrapLoops();
+ finalizeMetrics();
+}
+
+template <class BT> void BlockFrequencyInfoImpl<BT>::initializeRPOT() {
+ const BlockT *Entry = F->begin();
+ RPOT.reserve(F->size());
+ std::copy(po_begin(Entry), po_end(Entry), std::back_inserter(RPOT));
+ std::reverse(RPOT.begin(), RPOT.end());
+
+ assert(RPOT.size() - 1 <= BlockNode::getMaxIndex() &&
+ "More nodes in function than Block Frequency Info supports");
+
+ DEBUG(dbgs() << "reverse-post-order-traversal\n");
+ for (rpot_iterator I = rpot_begin(), E = rpot_end(); I != E; ++I) {
+ BlockNode Node = getNode(I);
+ DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node) << "\n");
+ Nodes[*I] = Node;
+ }
+
+ Working.reserve(RPOT.size());
+ for (size_t Index = 0; Index < RPOT.size(); ++Index)
+ Working.emplace_back(Index);
+ Freqs.resize(RPOT.size());
+}
+
+template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
+ DEBUG(dbgs() << "loop-detection\n");
+ if (LI->empty())
+ return;
+
+ // Visit loops top down and assign them an index.
+ std::deque<std::pair<const LoopT *, LoopData *>> Q;
+ for (const LoopT *L : *LI)
+ Q.emplace_back(L, nullptr);
+ while (!Q.empty()) {
+ const LoopT *Loop = Q.front().first;
+ LoopData *Parent = Q.front().second;
+ Q.pop_front();
+
+ BlockNode Header = getNode(Loop->getHeader());
+ assert(Header.isValid());
+
+ Loops.emplace_back(Parent, Header);
+ Working[Header.Index].Loop = &Loops.back();
+ DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
+
+ for (const LoopT *L : *Loop)
+ Q.emplace_back(L, &Loops.back());
+ }
+
+ // Visit nodes in reverse post-order and add them to their deepest containing
+ // loop.
+ for (size_t Index = 0; Index < RPOT.size(); ++Index) {
+ // Loop headers have already been mostly mapped.
+ if (Working[Index].isLoopHeader()) {
+ LoopData *ContainingLoop = Working[Index].getContainingLoop();
+ if (ContainingLoop)
+ ContainingLoop->Nodes.push_back(Index);
+ continue;
+ }
+
+ const LoopT *Loop = LI->getLoopFor(RPOT[Index]);
+ if (!Loop)
+ continue;
+
+ // Add this node to its containing loop's member list.
+ BlockNode Header = getNode(Loop->getHeader());
+ assert(Header.isValid());
+ const auto &HeaderData = Working[Header.Index];
+ assert(HeaderData.isLoopHeader());
+
+ Working[Index].Loop = HeaderData.Loop;
+ HeaderData.Loop->Nodes.push_back(Index);
+ DEBUG(dbgs() << " - loop = " << getBlockName(Header)
+ << ": member = " << getBlockName(Index) << "\n");
+ }
+}
+
+template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInLoops() {
+ // Visit loops with the deepest first, and the top-level loops last.
+ for (auto L = Loops.rbegin(), E = Loops.rend(); L != E; ++L) {
+ if (computeMassInLoop(*L))
+ continue;
+ auto Next = std::next(L);
+ computeIrreducibleMass(&*L, L.base());
+ L = std::prev(Next);
+ if (computeMassInLoop(*L))
+ continue;
+ llvm_unreachable("unhandled irreducible control flow");
+ }
+}
+
+template <class BT>
+bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
+ // Compute mass in loop.
+ DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n");
+
+ if (Loop.isIrreducible()) {
+ BlockMass Remaining = BlockMass::getFull();
+ for (uint32_t H = 0; H < Loop.NumHeaders; ++H) {
+ auto &Mass = Working[Loop.Nodes[H].Index].getMass();
+ Mass = Remaining * BranchProbability(1, Loop.NumHeaders - H);
+ Remaining -= Mass;
+ }
+ for (const BlockNode &M : Loop.Nodes)
+ if (!propagateMassToSuccessors(&Loop, M))
+ llvm_unreachable("unhandled irreducible control flow");
+ } else {
+ Working[Loop.getHeader().Index].getMass() = BlockMass::getFull();
+ if (!propagateMassToSuccessors(&Loop, Loop.getHeader()))
+ llvm_unreachable("irreducible control flow to loop header!?");
+ for (const BlockNode &M : Loop.members())
+ if (!propagateMassToSuccessors(&Loop, M))
+ // Irreducible backedge.
+ return false;
+ }
+
+ computeLoopScale(Loop);
+ packageLoop(Loop);
+ return true;
+}
+
+template <class BT>
+bool BlockFrequencyInfoImpl<BT>::tryToComputeMassInFunction() {
+ // Compute mass in function.
+ DEBUG(dbgs() << "compute-mass-in-function\n");
+ assert(!Working.empty() && "no blocks in function");
+ assert(!Working[0].isLoopHeader() && "entry block is a loop header");
+
+ Working[0].getMass() = BlockMass::getFull();
+ for (rpot_iterator I = rpot_begin(), IE = rpot_end(); I != IE; ++I) {
+ // Check for nodes that have been packaged.
+ BlockNode Node = getNode(I);
+ if (Working[Node.Index].isPackaged())
+ continue;
+
+ if (!propagateMassToSuccessors(nullptr, Node))
+ return false;
+ }
+ return true;
+}
+
+template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInFunction() {
+ if (tryToComputeMassInFunction())
+ return;
+ computeIrreducibleMass(nullptr, Loops.begin());
+ if (tryToComputeMassInFunction())
+ return;
+ llvm_unreachable("unhandled irreducible control flow");
+}
+
+/// \note This should be a lambda, but that crashes GCC 4.7.
+namespace bfi_detail {
+template <class BT> struct BlockEdgesAdder {
+ typedef BT BlockT;
+ typedef BlockFrequencyInfoImplBase::LoopData LoopData;
+ typedef GraphTraits<const BlockT *> Successor;
+
+ const BlockFrequencyInfoImpl<BT> &BFI;
+ explicit BlockEdgesAdder(const BlockFrequencyInfoImpl<BT> &BFI)
+ : BFI(BFI) {}
+ void operator()(IrreducibleGraph &G, IrreducibleGraph::IrrNode &Irr,
+ const LoopData *OuterLoop) {
+ const BlockT *BB = BFI.RPOT[Irr.Node.Index];
+ for (auto I = Successor::child_begin(BB), E = Successor::child_end(BB);
+ I != E; ++I)
+ G.addEdge(Irr, BFI.getNode(*I), OuterLoop);
+ }
+};
+}
+template <class BT>
+void BlockFrequencyInfoImpl<BT>::computeIrreducibleMass(
+ LoopData *OuterLoop, std::list<LoopData>::iterator Insert) {
+ DEBUG(dbgs() << "analyze-irreducible-in-";
+ if (OuterLoop) dbgs() << "loop: " << getLoopName(*OuterLoop) << "\n";
+ else dbgs() << "function\n");
+
+ using namespace bfi_detail;
+ // Ideally, addBlockEdges() would be declared here as a lambda, but that
+ // crashes GCC 4.7.
+ BlockEdgesAdder<BT> addBlockEdges(*this);
+ IrreducibleGraph G(*this, OuterLoop, addBlockEdges);
+
+ for (auto &L : analyzeIrreducible(G, OuterLoop, Insert))
+ computeMassInLoop(L);
+
+ if (!OuterLoop)
+ return;
+ updateLoopWithIrreducible(*OuterLoop);
+}
+
+template <class BT>
+bool
+BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(LoopData *OuterLoop,
+ const BlockNode &Node) {
+ DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
+ // Calculate probability for successors.
+ Distribution Dist;
+ if (auto *Loop = Working[Node.Index].getPackagedLoop()) {
+ assert(Loop != OuterLoop && "Cannot propagate mass in a packaged loop");
+ if (!addLoopSuccessorsToDist(OuterLoop, *Loop, Dist))
+ // Irreducible backedge.
+ return false;
+ } else {
+ const BlockT *BB = getBlock(Node);
+ for (auto SI = Successor::child_begin(BB), SE = Successor::child_end(BB);
+ SI != SE; ++SI)
+ // Do not dereference SI, or getEdgeWeight() is linear in the number of
+ // successors.
+ if (!addToDist(Dist, OuterLoop, Node, getNode(*SI),
+ BPI->getEdgeWeight(BB, SI)))
+ // Irreducible backedge.
+ return false;
+ }
+
+ // Distribute mass to successors, saving exit and backedge data in the
+ // loop header.
+ distributeMass(Node, OuterLoop, Dist);
+ return true;
+}
+
+template <class BT>
+raw_ostream &BlockFrequencyInfoImpl<BT>::print(raw_ostream &OS) const {
+ if (!F)
+ return OS;
+ OS << "block-frequency-info: " << F->getName() << "\n";
+ for (const BlockT &BB : *F)
+ OS << " - " << bfi_detail::getBlockName(&BB)
+ << ": float = " << getFloatingBlockFreq(&BB)
+ << ", int = " << getBlockFreq(&BB).getFrequency() << "\n";
+
+ // Add an extra newline for readability.
+ OS << "\n";
+ return OS;
+}
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/include/llvm/Analysis/BranchProbabilityInfo.h b/include/llvm/Analysis/BranchProbabilityInfo.h
index 4a6a280..4414c84 100644
--- a/include/llvm/Analysis/BranchProbabilityInfo.h
+++ b/include/llvm/Analysis/BranchProbabilityInfo.h
@@ -47,7 +47,7 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
- void print(raw_ostream &OS, const Module *M = 0) const override;
+ void print(raw_ostream &OS, const Module *M = nullptr) const override;
/// \brief Get an edge's probability, relative to other out-edges of the Src.
///
diff --git a/include/llvm/Analysis/CFG.h b/include/llvm/Analysis/CFG.h
index 02e3b45..7f92eda 100644
--- a/include/llvm/Analysis/CFG.h
+++ b/include/llvm/Analysis/CFG.h
@@ -65,8 +65,8 @@ bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
/// on branchy code but not loops, and LI is most useful on code with loops but
/// does not help on branchy code outside loops.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To,
- const DominatorTree *DT = 0,
- const LoopInfo *LI = 0);
+ const DominatorTree *DT = nullptr,
+ const LoopInfo *LI = nullptr);
/// \brief Determine whether block 'To' is reachable from 'From', returning
/// true if uncertain.
@@ -75,8 +75,8 @@ bool isPotentiallyReachable(const Instruction *From, const Instruction *To,
/// Returns false only if we can prove that once 'From' has been reached then
/// 'To' can not be executed. Conservatively returns true.
bool isPotentiallyReachable(const BasicBlock *From, const BasicBlock *To,
- const DominatorTree *DT = 0,
- const LoopInfo *LI = 0);
+ const DominatorTree *DT = nullptr,
+ const LoopInfo *LI = nullptr);
} // End llvm namespace
diff --git a/include/llvm/Analysis/CGSCCPassManager.h b/include/llvm/Analysis/CGSCCPassManager.h
new file mode 100644
index 0000000..09101ae
--- /dev/null
+++ b/include/llvm/Analysis/CGSCCPassManager.h
@@ -0,0 +1,591 @@
+//===- CGSCCPassManager.h - Call graph pass management ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This header provides classes for managing passes over SCCs of the call
+/// graph. These passes form an important component of LLVM's interprocedural
+/// optimizations. Because they operate on the SCCs of the call graph, and they
+/// wtraverse the graph in post order, they can effectively do pair-wise
+/// interprocedural optimizations for all call edges in the program. At each
+/// call site edge, the callee has already been optimized as much as is
+/// possible. This in turn allows very accurate analysis of it for IPO.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CGSCC_PASS_MANAGER_H
+#define LLVM_ANALYSIS_CGSCC_PASS_MANAGER_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+
+namespace llvm {
+
+class CGSCCAnalysisManager;
+
+class CGSCCPassManager {
+public:
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ CGSCCPassManager() {}
+ CGSCCPassManager(CGSCCPassManager &&Arg) : Passes(std::move(Arg.Passes)) {}
+ CGSCCPassManager &operator=(CGSCCPassManager &&RHS) {
+ Passes = std::move(RHS.Passes);
+ return *this;
+ }
+
+ /// \brief Run all of the CGSCC passes in this pass manager over a SCC.
+ PreservedAnalyses run(LazyCallGraph::SCC *C,
+ CGSCCAnalysisManager *AM = nullptr);
+
+ template <typename CGSCCPassT> void addPass(CGSCCPassT Pass) {
+ Passes.emplace_back(new CGSCCPassModel<CGSCCPassT>(std::move(Pass)));
+ }
+
+ static StringRef name() { return "CGSCCPassManager"; }
+
+private:
+ // Pull in the concept type and model template specialized for SCCs.
+ typedef detail::PassConcept<LazyCallGraph::SCC *, CGSCCAnalysisManager>
+ CGSCCPassConcept;
+ template <typename PassT>
+ struct CGSCCPassModel
+ : detail::PassModel<LazyCallGraph::SCC *, CGSCCAnalysisManager, PassT> {
+ CGSCCPassModel(PassT Pass)
+ : detail::PassModel<LazyCallGraph::SCC *, CGSCCAnalysisManager, PassT>(
+ std::move(Pass)) {}
+ };
+
+ CGSCCPassManager(const CGSCCPassManager &) LLVM_DELETED_FUNCTION;
+ CGSCCPassManager &operator=(const CGSCCPassManager &) LLVM_DELETED_FUNCTION;
+
+ std::vector<std::unique_ptr<CGSCCPassConcept>> Passes;
+};
+
+/// \brief A function analysis manager to coordinate and cache analyses run over
+/// a module.
+class CGSCCAnalysisManager : public detail::AnalysisManagerBase<
+ CGSCCAnalysisManager, LazyCallGraph::SCC *> {
+ friend class detail::AnalysisManagerBase<CGSCCAnalysisManager,
+ LazyCallGraph::SCC *>;
+ typedef detail::AnalysisManagerBase<CGSCCAnalysisManager,
+ LazyCallGraph::SCC *> BaseT;
+ typedef BaseT::ResultConceptT ResultConceptT;
+ typedef BaseT::PassConceptT PassConceptT;
+
+public:
+ // Most public APIs are inherited from the CRTP base class.
+
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ CGSCCAnalysisManager() {}
+ CGSCCAnalysisManager(CGSCCAnalysisManager &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))),
+ CGSCCAnalysisResults(std::move(Arg.CGSCCAnalysisResults)) {}
+ CGSCCAnalysisManager &operator=(CGSCCAnalysisManager &&RHS) {
+ BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
+ CGSCCAnalysisResults = std::move(RHS.CGSCCAnalysisResults);
+ return *this;
+ }
+
+ /// \brief Returns true if the analysis manager has an empty results cache.
+ bool empty() const;
+
+ /// \brief Clear the function analysis result cache.
+ ///
+ /// This routine allows cleaning up when the set of functions itself has
+ /// potentially changed, and thus we can't even look up a a result and
+ /// invalidate it directly. Notably, this does *not* call invalidate
+ /// functions as there is nothing to be done for them.
+ void clear();
+
+private:
+ CGSCCAnalysisManager(const CGSCCAnalysisManager &) LLVM_DELETED_FUNCTION;
+ CGSCCAnalysisManager &
+ operator=(const CGSCCAnalysisManager &) LLVM_DELETED_FUNCTION;
+
+ /// \brief Get a function pass result, running the pass if necessary.
+ ResultConceptT &getResultImpl(void *PassID, LazyCallGraph::SCC *C);
+
+ /// \brief Get a cached function pass result or return null.
+ ResultConceptT *getCachedResultImpl(void *PassID,
+ LazyCallGraph::SCC *C) const;
+
+ /// \brief Invalidate a function pass result.
+ void invalidateImpl(void *PassID, LazyCallGraph::SCC *C);
+
+ /// \brief Invalidate the results for a function..
+ void invalidateImpl(LazyCallGraph::SCC *C, const PreservedAnalyses &PA);
+
+ /// \brief List of function analysis pass IDs and associated concept pointers.
+ ///
+ /// Requires iterators to be valid across appending new entries and arbitrary
+ /// erases. Provides both the pass ID and concept pointer such that it is
+ /// half of a bijection and provides storage for the actual result concept.
+ typedef std::list<
+ std::pair<void *, std::unique_ptr<detail::AnalysisResultConcept<
+ LazyCallGraph::SCC *>>>> CGSCCAnalysisResultListT;
+
+ /// \brief Map type from function pointer to our custom list type.
+ typedef DenseMap<LazyCallGraph::SCC *, CGSCCAnalysisResultListT>
+ CGSCCAnalysisResultListMapT;
+
+ /// \brief Map from function to a list of function analysis results.
+ ///
+ /// Provides linear time removal of all analysis results for a function and
+ /// the ultimate storage for a particular cached analysis result.
+ CGSCCAnalysisResultListMapT CGSCCAnalysisResultLists;
+
+ /// \brief Map type from a pair of analysis ID and function pointer to an
+ /// iterator into a particular result list.
+ typedef DenseMap<std::pair<void *, LazyCallGraph::SCC *>,
+ CGSCCAnalysisResultListT::iterator> CGSCCAnalysisResultMapT;
+
+ /// \brief Map from an analysis ID and function to a particular cached
+ /// analysis result.
+ CGSCCAnalysisResultMapT CGSCCAnalysisResults;
+};
+
+/// \brief A module analysis which acts as a proxy for a CGSCC analysis
+/// manager.
+///
+/// This primarily proxies invalidation information from the module analysis
+/// manager and module pass manager to a CGSCC analysis manager. You should
+/// never use a CGSCC analysis manager from within (transitively) a module
+/// pass manager unless your parent module pass has received a proxy result
+/// object for it.
+class CGSCCAnalysisManagerModuleProxy {
+public:
+ class Result {
+ public:
+ explicit Result(CGSCCAnalysisManager &CGAM) : CGAM(&CGAM) {}
+ // We have to explicitly define all the special member functions because
+ // MSVC refuses to generate them.
+ Result(const Result &Arg) : CGAM(Arg.CGAM) {}
+ Result(Result &&Arg) : CGAM(std::move(Arg.CGAM)) {}
+ Result &operator=(Result RHS) {
+ std::swap(CGAM, RHS.CGAM);
+ return *this;
+ }
+ ~Result();
+
+ /// \brief Accessor for the \c CGSCCAnalysisManager.
+ CGSCCAnalysisManager &getManager() { return *CGAM; }
+
+ /// \brief Handler for invalidation of the module.
+ ///
+ /// If this analysis itself is preserved, then we assume that the call
+ /// graph of the module hasn't changed and thus we don't need to invalidate
+ /// *all* cached data associated with a \c SCC* in the \c
+ /// CGSCCAnalysisManager.
+ ///
+ /// Regardless of whether this analysis is marked as preserved, all of the
+ /// analyses in the \c CGSCCAnalysisManager are potentially invalidated
+ /// based on the set of preserved analyses.
+ bool invalidate(Module *M, const PreservedAnalyses &PA);
+
+ private:
+ CGSCCAnalysisManager *CGAM;
+ };
+
+ static void *ID() { return (void *)&PassID; }
+
+ explicit CGSCCAnalysisManagerModuleProxy(CGSCCAnalysisManager &CGAM)
+ : CGAM(&CGAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ CGSCCAnalysisManagerModuleProxy(
+ const CGSCCAnalysisManagerModuleProxy &Arg)
+ : CGAM(Arg.CGAM) {}
+ CGSCCAnalysisManagerModuleProxy(CGSCCAnalysisManagerModuleProxy &&Arg)
+ : CGAM(std::move(Arg.CGAM)) {}
+ CGSCCAnalysisManagerModuleProxy &
+ operator=(CGSCCAnalysisManagerModuleProxy RHS) {
+ std::swap(CGAM, RHS.CGAM);
+ return *this;
+ }
+
+ /// \brief Run the analysis pass and create our proxy result object.
+ ///
+ /// This doesn't do any interesting work, it is primarily used to insert our
+ /// proxy result object into the module analysis cache so that we can proxy
+ /// invalidation to the CGSCC analysis manager.
+ ///
+ /// In debug builds, it will also assert that the analysis manager is empty
+ /// as no queries should arrive at the CGSCC analysis manager prior to
+ /// this analysis being requested.
+ Result run(Module *M);
+
+private:
+ static char PassID;
+
+ CGSCCAnalysisManager *CGAM;
+};
+
+/// \brief A CGSCC analysis which acts as a proxy for a module analysis
+/// manager.
+///
+/// This primarily provides an accessor to a parent module analysis manager to
+/// CGSCC passes. Only the const interface of the module analysis manager is
+/// provided to indicate that once inside of a CGSCC analysis pass you
+/// cannot request a module analysis to actually run. Instead, the user must
+/// rely on the \c getCachedResult API.
+///
+/// This proxy *doesn't* manage the invalidation in any way. That is handled by
+/// the recursive return path of each layer of the pass manager and the
+/// returned PreservedAnalysis set.
+class ModuleAnalysisManagerCGSCCProxy {
+public:
+ /// \brief Result proxy object for \c ModuleAnalysisManagerCGSCCProxy.
+ class Result {
+ public:
+ explicit Result(const ModuleAnalysisManager &MAM) : MAM(&MAM) {}
+ // We have to explicitly define all the special member functions because
+ // MSVC refuses to generate them.
+ Result(const Result &Arg) : MAM(Arg.MAM) {}
+ Result(Result &&Arg) : MAM(std::move(Arg.MAM)) {}
+ Result &operator=(Result RHS) {
+ std::swap(MAM, RHS.MAM);
+ return *this;
+ }
+
+ const ModuleAnalysisManager &getManager() const { return *MAM; }
+
+ /// \brief Handle invalidation by ignoring it, this pass is immutable.
+ bool invalidate(LazyCallGraph::SCC *) { return false; }
+
+ private:
+ const ModuleAnalysisManager *MAM;
+ };
+
+ static void *ID() { return (void *)&PassID; }
+
+ ModuleAnalysisManagerCGSCCProxy(const ModuleAnalysisManager &MAM)
+ : MAM(&MAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ ModuleAnalysisManagerCGSCCProxy(
+ const ModuleAnalysisManagerCGSCCProxy &Arg)
+ : MAM(Arg.MAM) {}
+ ModuleAnalysisManagerCGSCCProxy(ModuleAnalysisManagerCGSCCProxy &&Arg)
+ : MAM(std::move(Arg.MAM)) {}
+ ModuleAnalysisManagerCGSCCProxy &
+ operator=(ModuleAnalysisManagerCGSCCProxy RHS) {
+ std::swap(MAM, RHS.MAM);
+ return *this;
+ }
+
+ /// \brief Run the analysis pass and create our proxy result object.
+ /// Nothing to see here, it just forwards the \c MAM reference into the
+ /// result.
+ Result run(LazyCallGraph::SCC *) { return Result(*MAM); }
+
+private:
+ static char PassID;
+
+ const ModuleAnalysisManager *MAM;
+};
+
+/// \brief The core module pass which does a post-order walk of the SCCs and
+/// runs a CGSCC pass over each one.
+///
+/// Designed to allow composition of a CGSCCPass(Manager) and
+/// a ModulePassManager. Note that this pass must be run with a module analysis
+/// manager as it uses the LazyCallGraph analysis. It will also run the
+/// \c CGSCCAnalysisManagerModuleProxy analysis prior to running the CGSCC
+/// pass over the module to enable a \c FunctionAnalysisManager to be used
+/// within this run safely.
+template <typename CGSCCPassT> class ModuleToPostOrderCGSCCPassAdaptor {
+public:
+ explicit ModuleToPostOrderCGSCCPassAdaptor(CGSCCPassT Pass)
+ : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ ModuleToPostOrderCGSCCPassAdaptor(
+ const ModuleToPostOrderCGSCCPassAdaptor &Arg)
+ : Pass(Arg.Pass) {}
+ ModuleToPostOrderCGSCCPassAdaptor(ModuleToPostOrderCGSCCPassAdaptor &&Arg)
+ : Pass(std::move(Arg.Pass)) {}
+ friend void swap(ModuleToPostOrderCGSCCPassAdaptor &LHS,
+ ModuleToPostOrderCGSCCPassAdaptor &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ ModuleToPostOrderCGSCCPassAdaptor &
+ operator=(ModuleToPostOrderCGSCCPassAdaptor RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ /// \brief Runs the CGSCC pass across every SCC in the module.
+ PreservedAnalyses run(Module *M, ModuleAnalysisManager *AM) {
+ assert(AM && "We need analyses to compute the call graph!");
+
+ // Setup the CGSCC analysis manager from its proxy.
+ CGSCCAnalysisManager &CGAM =
+ AM->getResult<CGSCCAnalysisManagerModuleProxy>(M).getManager();
+
+ // Get the call graph for this module.
+ LazyCallGraph &CG = AM->getResult<LazyCallGraphAnalysis>(M);
+
+ PreservedAnalyses PA = PreservedAnalyses::all();
+ for (LazyCallGraph::SCC &C : CG.postorder_sccs()) {
+ PreservedAnalyses PassPA = Pass.run(&C, &CGAM);
+
+ // We know that the CGSCC pass couldn't have invalidated any other
+ // SCC's analyses (that's the contract of a CGSCC pass), so
+ // directly handle the CGSCC analysis manager's invalidation here.
+ // FIXME: This isn't quite correct. We need to handle the case where the
+ // pass updated the CG, particularly some child of the current SCC, and
+ // invalidate its analyses.
+ CGAM.invalidate(&C, PassPA);
+
+ // Then intersect the preserved set so that invalidation of module
+ // analyses will eventually occur when the module pass completes.
+ PA.intersect(std::move(PassPA));
+ }
+
+ // By definition we preserve the proxy. This precludes *any* invalidation
+ // of CGSCC analyses by the proxy, but that's OK because we've taken
+ // care to invalidate analyses in the CGSCC analysis manager
+ // incrementally above.
+ PA.preserve<CGSCCAnalysisManagerModuleProxy>();
+ return PA;
+ }
+
+ static StringRef name() { return "ModuleToPostOrderCGSCCPassAdaptor"; }
+
+private:
+ CGSCCPassT Pass;
+};
+
+/// \brief A function to deduce a function pass type and wrap it in the
+/// templated adaptor.
+template <typename CGSCCPassT>
+ModuleToPostOrderCGSCCPassAdaptor<CGSCCPassT>
+createModuleToPostOrderCGSCCPassAdaptor(CGSCCPassT Pass) {
+ return std::move(
+ ModuleToPostOrderCGSCCPassAdaptor<CGSCCPassT>(std::move(Pass)));
+}
+
+/// \brief A CGSCC analysis which acts as a proxy for a function analysis
+/// manager.
+///
+/// This primarily proxies invalidation information from the CGSCC analysis
+/// manager and CGSCC pass manager to a function analysis manager. You should
+/// never use a function analysis manager from within (transitively) a CGSCC
+/// pass manager unless your parent CGSCC pass has received a proxy result
+/// object for it.
+class FunctionAnalysisManagerCGSCCProxy {
+public:
+ class Result {
+ public:
+ explicit Result(FunctionAnalysisManager &FAM) : FAM(&FAM) {}
+ // We have to explicitly define all the special member functions because
+ // MSVC refuses to generate them.
+ Result(const Result &Arg) : FAM(Arg.FAM) {}
+ Result(Result &&Arg) : FAM(std::move(Arg.FAM)) {}
+ Result &operator=(Result RHS) {
+ std::swap(FAM, RHS.FAM);
+ return *this;
+ }
+ ~Result();
+
+ /// \brief Accessor for the \c FunctionAnalysisManager.
+ FunctionAnalysisManager &getManager() { return *FAM; }
+
+ /// \brief Handler for invalidation of the SCC.
+ ///
+ /// If this analysis itself is preserved, then we assume that the set of \c
+ /// Function objects in the \c SCC hasn't changed and thus we don't need
+ /// to invalidate *all* cached data associated with a \c Function* in the \c
+ /// FunctionAnalysisManager.
+ ///
+ /// Regardless of whether this analysis is marked as preserved, all of the
+ /// analyses in the \c FunctionAnalysisManager are potentially invalidated
+ /// based on the set of preserved analyses.
+ bool invalidate(LazyCallGraph::SCC *C, const PreservedAnalyses &PA);
+
+ private:
+ FunctionAnalysisManager *FAM;
+ };
+
+ static void *ID() { return (void *)&PassID; }
+
+ explicit FunctionAnalysisManagerCGSCCProxy(FunctionAnalysisManager &FAM)
+ : FAM(&FAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ FunctionAnalysisManagerCGSCCProxy(
+ const FunctionAnalysisManagerCGSCCProxy &Arg)
+ : FAM(Arg.FAM) {}
+ FunctionAnalysisManagerCGSCCProxy(FunctionAnalysisManagerCGSCCProxy &&Arg)
+ : FAM(std::move(Arg.FAM)) {}
+ FunctionAnalysisManagerCGSCCProxy &
+ operator=(FunctionAnalysisManagerCGSCCProxy RHS) {
+ std::swap(FAM, RHS.FAM);
+ return *this;
+ }
+
+ /// \brief Run the analysis pass and create our proxy result object.
+ ///
+ /// This doesn't do any interesting work, it is primarily used to insert our
+ /// proxy result object into the module analysis cache so that we can proxy
+ /// invalidation to the function analysis manager.
+ ///
+ /// In debug builds, it will also assert that the analysis manager is empty
+ /// as no queries should arrive at the function analysis manager prior to
+ /// this analysis being requested.
+ Result run(LazyCallGraph::SCC *C);
+
+private:
+ static char PassID;
+
+ FunctionAnalysisManager *FAM;
+};
+
+/// \brief A function analysis which acts as a proxy for a CGSCC analysis
+/// manager.
+///
+/// This primarily provides an accessor to a parent CGSCC analysis manager to
+/// function passes. Only the const interface of the CGSCC analysis manager is
+/// provided to indicate that once inside of a function analysis pass you
+/// cannot request a CGSCC analysis to actually run. Instead, the user must
+/// rely on the \c getCachedResult API.
+///
+/// This proxy *doesn't* manage the invalidation in any way. That is handled by
+/// the recursive return path of each layer of the pass manager and the
+/// returned PreservedAnalysis set.
+class CGSCCAnalysisManagerFunctionProxy {
+public:
+ /// \brief Result proxy object for \c ModuleAnalysisManagerFunctionProxy.
+ class Result {
+ public:
+ explicit Result(const CGSCCAnalysisManager &CGAM) : CGAM(&CGAM) {}
+ // We have to explicitly define all the special member functions because
+ // MSVC refuses to generate them.
+ Result(const Result &Arg) : CGAM(Arg.CGAM) {}
+ Result(Result &&Arg) : CGAM(std::move(Arg.CGAM)) {}
+ Result &operator=(Result RHS) {
+ std::swap(CGAM, RHS.CGAM);
+ return *this;
+ }
+
+ const CGSCCAnalysisManager &getManager() const { return *CGAM; }
+
+ /// \brief Handle invalidation by ignoring it, this pass is immutable.
+ bool invalidate(Function *) { return false; }
+
+ private:
+ const CGSCCAnalysisManager *CGAM;
+ };
+
+ static void *ID() { return (void *)&PassID; }
+
+ CGSCCAnalysisManagerFunctionProxy(const CGSCCAnalysisManager &CGAM)
+ : CGAM(&CGAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ CGSCCAnalysisManagerFunctionProxy(
+ const CGSCCAnalysisManagerFunctionProxy &Arg)
+ : CGAM(Arg.CGAM) {}
+ CGSCCAnalysisManagerFunctionProxy(CGSCCAnalysisManagerFunctionProxy &&Arg)
+ : CGAM(std::move(Arg.CGAM)) {}
+ CGSCCAnalysisManagerFunctionProxy &
+ operator=(CGSCCAnalysisManagerFunctionProxy RHS) {
+ std::swap(CGAM, RHS.CGAM);
+ return *this;
+ }
+
+ /// \brief Run the analysis pass and create our proxy result object.
+ /// Nothing to see here, it just forwards the \c CGAM reference into the
+ /// result.
+ Result run(Function *) { return Result(*CGAM); }
+
+private:
+ static char PassID;
+
+ const CGSCCAnalysisManager *CGAM;
+};
+
+/// \brief Adaptor that maps from a SCC to its functions.
+///
+/// Designed to allow composition of a FunctionPass(Manager) and
+/// a CGSCCPassManager. Note that if this pass is constructed with a pointer
+/// to a \c CGSCCAnalysisManager it will run the
+/// \c FunctionAnalysisManagerCGSCCProxy analysis prior to running the function
+/// pass over the SCC to enable a \c FunctionAnalysisManager to be used
+/// within this run safely.
+template <typename FunctionPassT> class CGSCCToFunctionPassAdaptor {
+public:
+ explicit CGSCCToFunctionPassAdaptor(FunctionPassT Pass)
+ : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ CGSCCToFunctionPassAdaptor(const CGSCCToFunctionPassAdaptor &Arg)
+ : Pass(Arg.Pass) {}
+ CGSCCToFunctionPassAdaptor(CGSCCToFunctionPassAdaptor &&Arg)
+ : Pass(std::move(Arg.Pass)) {}
+ friend void swap(CGSCCToFunctionPassAdaptor &LHS, CGSCCToFunctionPassAdaptor &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ CGSCCToFunctionPassAdaptor &operator=(CGSCCToFunctionPassAdaptor RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ /// \brief Runs the function pass across every function in the module.
+ PreservedAnalyses run(LazyCallGraph::SCC *C, CGSCCAnalysisManager *AM) {
+ FunctionAnalysisManager *FAM = nullptr;
+ if (AM)
+ // Setup the function analysis manager from its proxy.
+ FAM = &AM->getResult<FunctionAnalysisManagerCGSCCProxy>(C).getManager();
+
+ PreservedAnalyses PA = PreservedAnalyses::all();
+ for (LazyCallGraph::Node *N : *C) {
+ PreservedAnalyses PassPA = Pass.run(&N->getFunction(), FAM);
+
+ // We know that the function pass couldn't have invalidated any other
+ // function's analyses (that's the contract of a function pass), so
+ // directly handle the function analysis manager's invalidation here.
+ if (FAM)
+ FAM->invalidate(&N->getFunction(), PassPA);
+
+ // Then intersect the preserved set so that invalidation of module
+ // analyses will eventually occur when the module pass completes.
+ PA.intersect(std::move(PassPA));
+ }
+
+ // By definition we preserve the proxy. This precludes *any* invalidation
+ // of function analyses by the proxy, but that's OK because we've taken
+ // care to invalidate analyses in the function analysis manager
+ // incrementally above.
+ // FIXME: We need to update the call graph here to account for any deleted
+ // edges!
+ PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+ return PA;
+ }
+
+ static StringRef name() { return "CGSCCToFunctionPassAdaptor"; }
+
+private:
+ FunctionPassT Pass;
+};
+
+/// \brief A function to deduce a function pass type and wrap it in the
+/// templated adaptor.
+template <typename FunctionPassT>
+CGSCCToFunctionPassAdaptor<FunctionPassT>
+createCGSCCToFunctionPassAdaptor(FunctionPassT Pass) {
+ return std::move(CGSCCToFunctionPassAdaptor<FunctionPassT>(std::move(Pass)));
+}
+
+}
+
+#endif
diff --git a/include/llvm/Analysis/ConstantFolding.h b/include/llvm/Analysis/ConstantFolding.h
index 0018a56..09d45ca 100644
--- a/include/llvm/Analysis/ConstantFolding.h
+++ b/include/llvm/Analysis/ConstantFolding.h
@@ -36,15 +36,16 @@ namespace llvm {
/// Note that this fails if not all of the operands are constant. Otherwise,
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
-Constant *ConstantFoldInstruction(Instruction *I, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0);
+Constant *ConstantFoldInstruction(Instruction *I,
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr);
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
/// using the specified DataLayout. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI =nullptr);
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
/// specified operands. If successful, the constant result is returned, if not,
@@ -54,8 +55,8 @@ Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
///
Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr);
/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
/// instruction (icmp/fcmp) with the specified operands. If it fails, it
@@ -63,8 +64,8 @@ Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
///
Constant *ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *LHS, Constant *RHS,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI=nullptr);
/// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue
/// instruction with the specified operands and indices. The constant result is
@@ -75,7 +76,8 @@ Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would
/// produce if it is constant and determinable. If this is not determinable,
/// return null.
-Constant *ConstantFoldLoadFromConstPtr(Constant *C, const DataLayout *TD = 0);
+Constant *ConstantFoldLoadFromConstPtr(Constant *C,
+ const DataLayout *TD = nullptr);
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
/// getelementptr constantexpr, return the constant value being addressed by the
@@ -96,7 +98,7 @@ bool canConstantFoldCallTo(const Function *F);
/// ConstantFoldCall - Attempt to constant fold a call to the specified function
/// with the specified arguments, returning null if unsuccessful.
Constant *ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
- const TargetLibraryInfo *TLI = 0);
+ const TargetLibraryInfo *TLI = nullptr);
}
#endif
diff --git a/include/llvm/Analysis/DOTGraphTraitsPass.h b/include/llvm/Analysis/DOTGraphTraitsPass.h
index ff3392a..53c832c 100644
--- a/include/llvm/Analysis/DOTGraphTraitsPass.h
+++ b/include/llvm/Analysis/DOTGraphTraitsPass.h
@@ -16,6 +16,7 @@
#include "llvm/Analysis/CFGPrinter.h"
#include "llvm/Pass.h"
+#include "llvm/Support/FileSystem.h"
namespace llvm {
diff --git a/include/llvm/Analysis/DependenceAnalysis.h b/include/llvm/Analysis/DependenceAnalysis.h
index a142828..279755e 100644
--- a/include/llvm/Analysis/DependenceAnalysis.h
+++ b/include/llvm/Analysis/DependenceAnalysis.h
@@ -73,8 +73,8 @@ namespace llvm {
Instruction *Destination) :
Src(Source),
Dst(Destination),
- NextPredecessor(NULL),
- NextSuccessor(NULL) {}
+ NextPredecessor(nullptr),
+ NextSuccessor(nullptr) {}
virtual ~Dependence() {}
/// Dependence::DVEntry - Each level in the distance/direction vector
@@ -96,7 +96,7 @@ namespace llvm {
bool Splitable : 1; // Splitting the loop will break dependence.
const SCEV *Distance; // NULL implies no distance available.
DVEntry() : Direction(ALL), Scalar(true), PeelFirst(false),
- PeelLast(false), Splitable(false), Distance(NULL) { }
+ PeelLast(false), Splitable(false), Distance(nullptr) { }
};
/// getSrc - Returns the source instruction for this dependence.
@@ -154,7 +154,7 @@ namespace llvm {
/// getDistance - Returns the distance (or NULL) associated with a
/// particular level.
- virtual const SCEV *getDistance(unsigned Level) const { return NULL; }
+ virtual const SCEV *getDistance(unsigned Level) const { return nullptr; }
/// isPeelFirst - Returns true if peeling the first iteration from
/// this loop will break this dependence.
@@ -910,7 +910,8 @@ namespace llvm {
const Constraint &CurConstraint) const;
bool tryDelinearize(const SCEV *SrcSCEV, const SCEV *DstSCEV,
- SmallVectorImpl<Subscript> &Pair) const;
+ SmallVectorImpl<Subscript> &Pair,
+ const SCEV *ElementSize) const;
public:
static char ID; // Class identification, replacement for typeinfo
@@ -921,7 +922,7 @@ namespace llvm {
bool runOnFunction(Function &F) override;
void releaseMemory() override;
void getAnalysisUsage(AnalysisUsage &) const override;
- void print(raw_ostream &, const Module * = 0) const override;
+ void print(raw_ostream &, const Module * = nullptr) const override;
}; // class DependenceAnalysis
/// createDependenceAnalysisPass - This creates an instance of the
diff --git a/include/llvm/Analysis/DominanceFrontier.h b/include/llvm/Analysis/DominanceFrontier.h
index 4dcea2d..0fbaa13 100644
--- a/include/llvm/Analysis/DominanceFrontier.h
+++ b/include/llvm/Analysis/DominanceFrontier.h
@@ -142,7 +142,7 @@ public:
/// print - Convert to human readable form
///
- void print(raw_ostream &OS, const Module* = 0) const override;
+ void print(raw_ostream &OS, const Module* = nullptr) const override;
/// dump - Dump the dominance frontier to dbgs().
void dump() const;
diff --git a/include/llvm/Analysis/IVUsers.h b/include/llvm/Analysis/IVUsers.h
index c6bb494..6038872 100644
--- a/include/llvm/Analysis/IVUsers.h
+++ b/include/llvm/Analysis/IVUsers.h
@@ -169,7 +169,7 @@ public:
return Processed.count(Inst);
}
- void print(raw_ostream &OS, const Module* = 0) const override;
+ void print(raw_ostream &OS, const Module* = nullptr) const override;
/// dump - This method is used for debugging.
void dump() const;
diff --git a/include/llvm/Analysis/InstructionSimplify.h b/include/llvm/Analysis/InstructionSimplify.h
index 775d0df..2367c0b 100644
--- a/include/llvm/Analysis/InstructionSimplify.h
+++ b/include/llvm/Analysis/InstructionSimplify.h
@@ -48,160 +48,166 @@ namespace llvm {
/// SimplifyAddInst - Given operands for an Add, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifySubInst - Given operands for a Sub, see if we can
/// fold the result. If not, this returns null.
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// Given operands for an FAdd, see if we can fold the result. If not, this
/// returns null.
Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// Given operands for an FSub, see if we can fold the result. If not, this
/// returns null.
Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// Given operands for an FMul, see if we can fold the result. If not, this
/// returns null.
Value *SimplifyFMulInst(Value *LHS, Value *RHS,
FastMathFlags FMF,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyMulInst - Given operands for a Mul, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifySDivInst - Given operands for an SDiv, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifySDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ Value *SimplifySDivInst(Value *LHS, Value *RHS,
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyUDivInst - Given operands for a UDiv, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyUDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ Value *SimplifyUDivInst(Value *LHS, Value *RHS,
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyFDivInst - Given operands for an FDiv, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyFDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ Value *SimplifyFDivInst(Value *LHS, Value *RHS,
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifySRemInst - Given operands for an SRem, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifySRemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ Value *SimplifySRemInst(Value *LHS, Value *RHS,
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyURemInst - Given operands for a URem, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyURemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ Value *SimplifyURemInst(Value *LHS, Value *RHS,
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyFRemInst - Given operands for an FRem, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyFRemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ Value *SimplifyFRemInst(Value *LHS, Value *RHS,
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyShlInst - Given operands for a Shl, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyLShrInst - Given operands for a LShr, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyAShrInst - Given operands for a AShr, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyAndInst - Given operands for an And, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyOrInst - Given operands for an Or, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyXorInst - Given operands for a Xor, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
/// the result. If not, this returns null.
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
- Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyInsertValueInst - Given operands for an InsertValueInst, see if we
/// can fold the result. If not, this returns null.
Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyTruncInst - Given operands for an TruncInst, see if we can fold
/// the result. If not, this returns null.
- Value *SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ Value *SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
//=== Helper functions for higher up the class hierarchy.
@@ -209,40 +215,40 @@ namespace llvm {
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// \brief Given a function and iterators over arguments, see if we can fold
/// the result.
///
/// If this call could not be simplified returns null.
Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
- User::op_iterator ArgEnd, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ User::op_iterator ArgEnd, const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// \brief Given a function and set of arguments, see if we can fold the
/// result.
///
/// If this call could not be simplified returns null.
Value *SimplifyCall(Value *V, ArrayRef<Value *> Args,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
- Value *SimplifyInstruction(Instruction *I, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ Value *SimplifyInstruction(Instruction *I, const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// \brief Replace all uses of 'I' with 'SimpleV' and simplify the uses
@@ -254,9 +260,9 @@ namespace llvm {
///
/// The function returns true if any simplifications were performed.
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
/// \brief Recursively attempt to simplify an instruction.
///
@@ -265,9 +271,9 @@ namespace llvm {
/// of the users impacted. It returns true if any simplifications were
/// performed.
bool recursivelySimplifyInstruction(Instruction *I,
- const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0,
- const DominatorTree *DT = 0);
+ const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr);
} // end namespace llvm
#endif
diff --git a/include/llvm/Analysis/IntervalPartition.h b/include/llvm/Analysis/IntervalPartition.h
index 05248bd..274be2b 100644
--- a/include/llvm/Analysis/IntervalPartition.h
+++ b/include/llvm/Analysis/IntervalPartition.h
@@ -48,7 +48,7 @@ class IntervalPartition : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
- IntervalPartition() : FunctionPass(ID), RootInterval(0) {
+ IntervalPartition() : FunctionPass(ID), RootInterval(nullptr) {
initializeIntervalPartitionPass(*PassRegistry::getPassRegistry());
}
@@ -62,7 +62,7 @@ public:
IntervalPartition(IntervalPartition &I, bool);
// print - Show contents in human readable format...
- void print(raw_ostream &O, const Module* = 0) const override;
+ void print(raw_ostream &O, const Module* = nullptr) const override;
// getRootInterval() - Return the root interval that contains the starting
// block of the function.
@@ -77,7 +77,7 @@ public:
// getBlockInterval - Return the interval that a basic block exists in.
inline Interval *getBlockInterval(BasicBlock *BB) {
IntervalMapTy::iterator I = IntervalMap.find(BB);
- return I != IntervalMap.end() ? I->second : 0;
+ return I != IntervalMap.end() ? I->second : nullptr;
}
// getAnalysisUsage - Implement the Pass API
diff --git a/include/llvm/Analysis/LazyCallGraph.h b/include/llvm/Analysis/LazyCallGraph.h
index 74b0c8e..70a4df5 100644
--- a/include/llvm/Analysis/LazyCallGraph.h
+++ b/include/llvm/Analysis/LazyCallGraph.h
@@ -38,8 +38,11 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
@@ -100,6 +103,7 @@ class raw_ostream;
class LazyCallGraph {
public:
class Node;
+ class SCC;
typedef SmallVector<PointerUnion<Function *, Node *>, 4> NodeVectorT;
typedef SmallVectorImpl<PointerUnion<Function *, Node *>> NodeVectorImplT;
@@ -109,67 +113,271 @@ public:
/// be scanned for "calls" or uses of functions and its child information
/// will be constructed. All of these results are accumulated and cached in
/// the graph.
- class iterator : public std::iterator<std::bidirectional_iterator_tag, Node *,
- ptrdiff_t, Node *, Node *> {
+ class iterator
+ : public iterator_adaptor_base<iterator, NodeVectorImplT::iterator,
+ std::forward_iterator_tag, Node> {
friend class LazyCallGraph;
friend class LazyCallGraph::Node;
- typedef std::iterator<std::bidirectional_iterator_tag, Node *, ptrdiff_t,
- Node *, Node *> BaseT;
- /// \brief Nonce type to select the constructor for the end iterator.
- struct IsAtEndT {};
-
- LazyCallGraph &G;
- NodeVectorImplT::iterator NI;
+ LazyCallGraph *G;
+ NodeVectorImplT::iterator E;
- // Build the begin iterator for a node.
- explicit iterator(LazyCallGraph &G, NodeVectorImplT &Nodes)
- : G(G), NI(Nodes.begin()) {}
-
- // Build the end iterator for a node. This is selected purely by overload.
- iterator(LazyCallGraph &G, NodeVectorImplT &Nodes, IsAtEndT /*Nonce*/)
- : G(G), NI(Nodes.end()) {}
+ // Build the iterator for a specific position in a node list.
+ iterator(LazyCallGraph &G, NodeVectorImplT::iterator NI,
+ NodeVectorImplT::iterator E)
+ : iterator_adaptor_base(NI), G(&G), E(E) {
+ while (I != E && I->isNull())
+ ++I;
+ }
public:
- iterator(const iterator &Arg) : G(Arg.G), NI(Arg.NI) {}
- iterator(iterator &&Arg) : G(Arg.G), NI(std::move(Arg.NI)) {}
- iterator &operator=(iterator Arg) {
- std::swap(Arg, *this);
+ iterator() {}
+
+ using iterator_adaptor_base::operator++;
+ iterator &operator++() {
+ do {
+ ++I;
+ } while (I != E && I->isNull());
return *this;
}
- bool operator==(const iterator &Arg) { return NI == Arg.NI; }
- bool operator!=(const iterator &Arg) { return !operator==(Arg); }
-
reference operator*() const {
- if (NI->is<Node *>())
- return NI->get<Node *>();
+ if (I->is<Node *>())
+ return *I->get<Node *>();
- Function *F = NI->get<Function *>();
- Node *ChildN = G.get(*F);
- *NI = ChildN;
+ Function *F = I->get<Function *>();
+ Node &ChildN = G->get(*F);
+ *I = &ChildN;
return ChildN;
}
- pointer operator->() const { return operator*(); }
+ };
- iterator &operator++() {
- ++NI;
- return *this;
+ /// \brief A node in the call graph.
+ ///
+ /// This represents a single node. It's primary roles are to cache the list of
+ /// callees, de-duplicate and provide fast testing of whether a function is
+ /// a callee, and facilitate iteration of child nodes in the graph.
+ class Node {
+ friend class LazyCallGraph;
+ friend class LazyCallGraph::SCC;
+
+ LazyCallGraph *G;
+ Function &F;
+
+ // We provide for the DFS numbering and Tarjan walk lowlink numbers to be
+ // stored directly within the node.
+ int DFSNumber;
+ int LowLink;
+
+ mutable NodeVectorT Callees;
+ DenseMap<Function *, size_t> CalleeIndexMap;
+
+ /// \brief Basic constructor implements the scanning of F into Callees and
+ /// CalleeIndexMap.
+ Node(LazyCallGraph &G, Function &F);
+
+ /// \brief Internal helper to insert a callee.
+ void insertEdgeInternal(Function &Callee);
+
+ /// \brief Internal helper to insert a callee.
+ void insertEdgeInternal(Node &CalleeN);
+
+ /// \brief Internal helper to remove a callee from this node.
+ void removeEdgeInternal(Function &Callee);
+
+ public:
+ typedef LazyCallGraph::iterator iterator;
+
+ Function &getFunction() const {
+ return F;
+ };
+
+ iterator begin() const {
+ return iterator(*G, Callees.begin(), Callees.end());
}
- iterator operator++(int) {
- iterator prev = *this;
- ++*this;
- return prev;
+ iterator end() const { return iterator(*G, Callees.end(), Callees.end()); }
+
+ /// Equality is defined as address equality.
+ bool operator==(const Node &N) const { return this == &N; }
+ bool operator!=(const Node &N) const { return !operator==(N); }
+ };
+
+ /// \brief An SCC of the call graph.
+ ///
+ /// This represents a Strongly Connected Component of the call graph as
+ /// a collection of call graph nodes. While the order of nodes in the SCC is
+ /// stable, it is not any particular order.
+ class SCC {
+ friend class LazyCallGraph;
+ friend class LazyCallGraph::Node;
+
+ LazyCallGraph *G;
+ SmallPtrSet<SCC *, 1> ParentSCCs;
+ SmallVector<Node *, 1> Nodes;
+
+ SCC(LazyCallGraph &G) : G(&G) {}
+
+ void insert(Node &N);
+
+ void
+ internalDFS(SmallVectorImpl<std::pair<Node *, Node::iterator>> &DFSStack,
+ SmallVectorImpl<Node *> &PendingSCCStack, Node *N,
+ SmallVectorImpl<SCC *> &ResultSCCs);
+
+ public:
+ typedef SmallVectorImpl<Node *>::const_iterator iterator;
+ typedef pointee_iterator<SmallPtrSet<SCC *, 1>::const_iterator> parent_iterator;
+
+ iterator begin() const { return Nodes.begin(); }
+ iterator end() const { return Nodes.end(); }
+
+ parent_iterator parent_begin() const { return ParentSCCs.begin(); }
+ parent_iterator parent_end() const { return ParentSCCs.end(); }
+
+ iterator_range<parent_iterator> parents() const {
+ return iterator_range<parent_iterator>(parent_begin(), parent_end());
}
- iterator &operator--() {
- --NI;
- return *this;
+ /// \brief Test if this SCC is a parent of \a C.
+ bool isParentOf(const SCC &C) const { return C.isChildOf(*this); }
+
+ /// \brief Test if this SCC is an ancestor of \a C.
+ bool isAncestorOf(const SCC &C) const { return C.isDescendantOf(*this); }
+
+ /// \brief Test if this SCC is a child of \a C.
+ bool isChildOf(const SCC &C) const {
+ return ParentSCCs.count(const_cast<SCC *>(&C));
}
- iterator operator--(int) {
- iterator next = *this;
- --*this;
- return next;
+
+ /// \brief Test if this SCC is a descendant of \a C.
+ bool isDescendantOf(const SCC &C) const;
+
+ ///@{
+ /// \name Mutation API
+ ///
+ /// These methods provide the core API for updating the call graph in the
+ /// presence of a (potentially still in-flight) DFS-found SCCs.
+ ///
+ /// Note that these methods sometimes have complex runtimes, so be careful
+ /// how you call them.
+
+ /// \brief Insert an edge from one node in this SCC to another in this SCC.
+ ///
+ /// By the definition of an SCC, this does not change the nature or make-up
+ /// of any SCCs.
+ void insertIntraSCCEdge(Node &CallerN, Node &CalleeN);
+
+ /// \brief Insert an edge whose tail is in this SCC and head is in some
+ /// child SCC.
+ ///
+ /// There must be an existing path from the caller to the callee. This
+ /// operation is inexpensive and does not change the set of SCCs in the
+ /// graph.
+ void insertOutgoingEdge(Node &CallerN, Node &CalleeN);
+
+ /// \brief Insert an edge whose tail is in a descendant SCC and head is in
+ /// this SCC.
+ ///
+ /// There must be an existing path from the callee to the caller in this
+ /// case. NB! This is has the potential to be a very expensive function. It
+ /// inherently forms a cycle in the prior SCC DAG and we have to merge SCCs
+ /// to resolve that cycle. But finding all of the SCCs which participate in
+ /// the cycle can in the worst case require traversing every SCC in the
+ /// graph. Every attempt is made to avoid that, but passes must still
+ /// exercise caution calling this routine repeatedly.
+ ///
+ /// FIXME: We could possibly optimize this quite a bit for cases where the
+ /// caller and callee are very nearby in the graph. See comments in the
+ /// implementation for details, but that use case might impact users.
+ SmallVector<SCC *, 1> insertIncomingEdge(Node &CallerN, Node &CalleeN);
+
+ /// \brief Remove an edge whose source is in this SCC and target is *not*.
+ ///
+ /// This removes an inter-SCC edge. All inter-SCC edges originating from
+ /// this SCC have been fully explored by any in-flight DFS SCC formation,
+ /// so this is always safe to call once you have the source SCC.
+ ///
+ /// This operation does not change the set of SCCs or the members of the
+ /// SCCs and so is very inexpensive. It may change the connectivity graph
+ /// of the SCCs though, so be careful calling this while iterating over
+ /// them.
+ void removeInterSCCEdge(Node &CallerN, Node &CalleeN);
+
+ /// \brief Remove an edge which is entirely within this SCC.
+ ///
+ /// Both the \a Caller and the \a Callee must be within this SCC. Removing
+ /// such an edge make break cycles that form this SCC and thus this
+ /// operation may change the SCC graph significantly. In particular, this
+ /// operation will re-form new SCCs based on the remaining connectivity of
+ /// the graph. The following invariants are guaranteed to hold after
+ /// calling this method:
+ ///
+ /// 1) This SCC is still an SCC in the graph.
+ /// 2) This SCC will be the parent of any new SCCs. Thus, this SCC is
+ /// preserved as the root of any new SCC directed graph formed.
+ /// 3) No SCC other than this SCC has its member set changed (this is
+ /// inherent in the definition of removing such an edge).
+ /// 4) All of the parent links of the SCC graph will be updated to reflect
+ /// the new SCC structure.
+ /// 5) All SCCs formed out of this SCC, excluding this SCC, will be
+ /// returned in a vector.
+ /// 6) The order of the SCCs in the vector will be a valid postorder
+ /// traversal of the new SCCs.
+ ///
+ /// These invariants are very important to ensure that we can build
+ /// optimization pipeliens on top of the CGSCC pass manager which
+ /// intelligently update the SCC graph without invalidating other parts of
+ /// the SCC graph.
+ ///
+ /// The runtime complexity of this method is, in the worst case, O(V+E)
+ /// where V is the number of nodes in this SCC and E is the number of edges
+ /// leaving the nodes in this SCC. Note that E includes both edges within
+ /// this SCC and edges from this SCC to child SCCs. Some effort has been
+ /// made to minimize the overhead of common cases such as self-edges and
+ /// edge removals which result in a spanning tree with no more cycles.
+ SmallVector<SCC *, 1> removeIntraSCCEdge(Node &CallerN, Node &CalleeN);
+
+ ///@}
+ };
+
+ /// \brief A post-order depth-first SCC iterator over the call graph.
+ ///
+ /// This iterator triggers the Tarjan DFS-based formation of the SCC DAG for
+ /// the call graph, walking it lazily in depth-first post-order. That is, it
+ /// always visits SCCs for a callee prior to visiting the SCC for a caller
+ /// (when they are in different SCCs).
+ class postorder_scc_iterator
+ : public iterator_facade_base<postorder_scc_iterator,
+ std::forward_iterator_tag, SCC> {
+ friend class LazyCallGraph;
+ friend class LazyCallGraph::Node;
+
+ /// \brief Nonce type to select the constructor for the end iterator.
+ struct IsAtEndT {};
+
+ LazyCallGraph *G;
+ SCC *C;
+
+ // Build the begin iterator for a node.
+ postorder_scc_iterator(LazyCallGraph &G) : G(&G) {
+ C = G.getNextSCCInPostOrder();
+ }
+
+ // Build the end iterator for a node. This is selected purely by overload.
+ postorder_scc_iterator(LazyCallGraph &G, IsAtEndT /*Nonce*/)
+ : G(&G), C(nullptr) {}
+
+ public:
+ bool operator==(const postorder_scc_iterator &Arg) const {
+ return G == Arg.G && C == Arg.C;
+ }
+
+ reference operator*() const { return *C; }
+
+ using iterator_facade_base::operator++;
+ postorder_scc_iterator &operator++() {
+ C = G->getNextSCCInPostOrder();
+ return *this;
}
};
@@ -180,44 +388,75 @@ public:
/// requested during traversal.
LazyCallGraph(Module &M);
- /// \brief Copy constructor.
- ///
- /// This does a deep copy of the graph. It does no verification that the
- /// graph remains valid for the module. It is also relatively expensive.
- LazyCallGraph(const LazyCallGraph &G);
-
- /// \brief Move constructor.
- ///
- /// This is a deep move. It leaves G in an undefined but destroyable state.
- /// Any other operation on G is likely to fail.
LazyCallGraph(LazyCallGraph &&G);
+ LazyCallGraph &operator=(LazyCallGraph &&RHS);
+
+ iterator begin() {
+ return iterator(*this, EntryNodes.begin(), EntryNodes.end());
+ }
+ iterator end() { return iterator(*this, EntryNodes.end(), EntryNodes.end()); }
- /// \brief Copy and move assignment.
- LazyCallGraph &operator=(LazyCallGraph RHS) {
- std::swap(*this, RHS);
- return *this;
+ postorder_scc_iterator postorder_scc_begin() {
+ return postorder_scc_iterator(*this);
+ }
+ postorder_scc_iterator postorder_scc_end() {
+ return postorder_scc_iterator(*this, postorder_scc_iterator::IsAtEndT());
}
- iterator begin() { return iterator(*this, EntryNodes); }
- iterator end() { return iterator(*this, EntryNodes, iterator::IsAtEndT()); }
+ iterator_range<postorder_scc_iterator> postorder_sccs() {
+ return iterator_range<postorder_scc_iterator>(postorder_scc_begin(),
+ postorder_scc_end());
+ }
/// \brief Lookup a function in the graph which has already been scanned and
/// added.
Node *lookup(const Function &F) const { return NodeMap.lookup(&F); }
+ /// \brief Lookup a function's SCC in the graph.
+ ///
+ /// \returns null if the function hasn't been assigned an SCC via the SCC
+ /// iterator walk.
+ SCC *lookupSCC(Node &N) const { return SCCMap.lookup(&N); }
+
/// \brief Get a graph node for a given function, scanning it to populate the
/// graph data as necessary.
- Node *get(Function &F) {
+ Node &get(Function &F) {
Node *&N = NodeMap[&F];
if (N)
- return N;
+ return *N;
return insertInto(F, N);
}
-private:
- Module &M;
+ ///@{
+ /// \name Pre-SCC Mutation API
+ ///
+ /// These methods are only valid to call prior to forming any SCCs for this
+ /// call graph. They can be used to update the core node-graph during
+ /// a node-based inorder traversal that precedes any SCC-based traversal.
+ ///
+ /// Once you begin manipulating a call graph's SCCs, you must perform all
+ /// mutation of the graph via the SCC methods.
+
+ /// \brief Update the call graph after inserting a new edge.
+ void insertEdge(Node &Caller, Function &Callee);
+
+ /// \brief Update the call graph after inserting a new edge.
+ void insertEdge(Function &Caller, Function &Callee) {
+ return insertEdge(get(Caller), Callee);
+ }
+
+ /// \brief Update the call graph after deleting an edge.
+ void removeEdge(Node &Caller, Function &Callee);
+
+ /// \brief Update the call graph after deleting an edge.
+ void removeEdge(Function &Caller, Function &Callee) {
+ return removeEdge(get(Caller), Callee);
+ }
+
+ ///@}
+private:
/// \brief Allocator that holds all the call graph nodes.
SpecificBumpPtrAllocator<Node> BPA;
@@ -230,56 +469,46 @@ private:
/// escape at the module scope.
NodeVectorT EntryNodes;
- /// \brief Set of the entry nodes to the graph.
- SmallPtrSet<Function *, 4> EntryNodeSet;
-
- /// \brief Helper to insert a new function, with an already looked-up entry in
- /// the NodeMap.
- Node *insertInto(Function &F, Node *&MappedN);
+ /// \brief Map of the entry nodes in the graph to their indices in
+ /// \c EntryNodes.
+ DenseMap<Function *, size_t> EntryIndexMap;
- /// \brief Helper to copy a node from another graph into this one.
- Node *copyInto(const Node &OtherN);
+ /// \brief Allocator that holds all the call graph SCCs.
+ SpecificBumpPtrAllocator<SCC> SCCBPA;
- /// \brief Helper to move a node from another graph into this one.
- Node *moveInto(Node &&OtherN);
-};
+ /// \brief Maps Function -> SCC for fast lookup.
+ DenseMap<Node *, SCC *> SCCMap;
-/// \brief A node in the call graph.
-///
-/// This represents a single node. It's primary roles are to cache the list of
-/// callees, de-duplicate and provide fast testing of whether a function is
-/// a callee, and facilitate iteration of child nodes in the graph.
-class LazyCallGraph::Node {
- friend class LazyCallGraph;
+ /// \brief The leaf SCCs of the graph.
+ ///
+ /// These are all of the SCCs which have no children.
+ SmallVector<SCC *, 4> LeafSCCs;
- LazyCallGraph &G;
- Function &F;
- mutable NodeVectorT Callees;
- SmallPtrSet<Function *, 4> CalleeSet;
+ /// \brief Stack of nodes in the DFS walk.
+ SmallVector<std::pair<Node *, iterator>, 4> DFSStack;
- /// \brief Basic constructor implements the scanning of F into Callees and
- /// CalleeSet.
- Node(LazyCallGraph &G, Function &F);
+ /// \brief Set of entry nodes not-yet-processed into SCCs.
+ SmallVector<Function *, 4> SCCEntryNodes;
- /// \brief Constructor used when copying a node from one graph to another.
- Node(LazyCallGraph &G, const Node &OtherN);
+ /// \brief Stack of nodes the DFS has walked but not yet put into a SCC.
+ SmallVector<Node *, 4> PendingSCCStack;
- /// \brief Constructor used when moving a node from one graph to another.
- Node(LazyCallGraph &G, Node &&OtherN);
+ /// \brief Counter for the next DFS number to assign.
+ int NextDFSNumber;
-public:
- typedef LazyCallGraph::iterator iterator;
+ /// \brief Helper to insert a new function, with an already looked-up entry in
+ /// the NodeMap.
+ Node &insertInto(Function &F, Node *&MappedN);
- Function &getFunction() const {
- return F;
- };
+ /// \brief Helper to update pointers back to the graph object during moves.
+ void updateGraphPtrs();
- iterator begin() const { return iterator(G, Callees); }
- iterator end() const { return iterator(G, Callees, iterator::IsAtEndT()); }
+ /// \brief Helper to form a new SCC out of the top of a DFSStack-like
+ /// structure.
+ SCC *formSCC(Node *RootN, SmallVectorImpl<Node *> &NodeStack);
- /// Equality is defined as address equality.
- bool operator==(const Node &N) const { return this == &N; }
- bool operator!=(const Node &N) const { return !operator==(N); }
+ /// \brief Retrieve the next node in the post-order SCC walk of the call graph.
+ SCC *getNextSCCInPostOrder();
};
// Provide GraphTraits specializations for call graphs.
diff --git a/include/llvm/Analysis/LazyValueInfo.h b/include/llvm/Analysis/LazyValueInfo.h
index a4cb806..2fe7386 100644
--- a/include/llvm/Analysis/LazyValueInfo.h
+++ b/include/llvm/Analysis/LazyValueInfo.h
@@ -33,10 +33,10 @@ class LazyValueInfo : public FunctionPass {
void operator=(const LazyValueInfo&) LLVM_DELETED_FUNCTION;
public:
static char ID;
- LazyValueInfo() : FunctionPass(ID), PImpl(0) {
+ LazyValueInfo() : FunctionPass(ID), PImpl(nullptr) {
initializeLazyValueInfoPass(*PassRegistry::getPassRegistry());
}
- ~LazyValueInfo() { assert(PImpl == 0 && "releaseMemory not called"); }
+ ~LazyValueInfo() { assert(!PImpl && "releaseMemory not called"); }
/// Tristate - This is used to return true/false/dunno results.
enum Tristate {
diff --git a/include/llvm/Analysis/LibCallAliasAnalysis.h b/include/llvm/Analysis/LibCallAliasAnalysis.h
index 481015e..4c03c92 100644
--- a/include/llvm/Analysis/LibCallAliasAnalysis.h
+++ b/include/llvm/Analysis/LibCallAliasAnalysis.h
@@ -27,7 +27,7 @@ namespace llvm {
LibCallInfo *LCI;
- explicit LibCallAliasAnalysis(LibCallInfo *LC = 0)
+ explicit LibCallAliasAnalysis(LibCallInfo *LC = nullptr)
: FunctionPass(ID), LCI(LC) {
initializeLibCallAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
diff --git a/include/llvm/Analysis/LibCallSemantics.h b/include/llvm/Analysis/LibCallSemantics.h
index 0f0bc23..8bd747f 100644
--- a/include/llvm/Analysis/LibCallSemantics.h
+++ b/include/llvm/Analysis/LibCallSemantics.h
@@ -130,7 +130,7 @@ namespace llvm {
mutable const LibCallLocationInfo *Locations;
mutable unsigned NumLocations;
public:
- LibCallInfo() : Impl(0), Locations(0), NumLocations(0) {}
+ LibCallInfo() : Impl(nullptr), Locations(nullptr), NumLocations(0) {}
virtual ~LibCallInfo();
//===------------------------------------------------------------------===//
diff --git a/include/llvm/Analysis/Loads.h b/include/llvm/Analysis/Loads.h
index ebcb762..25c5928 100644
--- a/include/llvm/Analysis/Loads.h
+++ b/include/llvm/Analysis/Loads.h
@@ -27,7 +27,8 @@ class MDNode;
/// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed.
bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
- unsigned Align, const DataLayout *TD = 0);
+ unsigned Align,
+ const DataLayout *TD = nullptr);
/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at
/// the instruction before ScanFrom) checking to see if we have the value at
@@ -49,8 +50,8 @@ bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
BasicBlock::iterator &ScanFrom,
unsigned MaxInstsToScan = 6,
- AliasAnalysis *AA = 0,
- MDNode **TBAATag = 0);
+ AliasAnalysis *AA = nullptr,
+ MDNode **TBAATag = nullptr);
}
diff --git a/include/llvm/Analysis/LoopInfo.h b/include/llvm/Analysis/LoopInfo.h
index aeeea3c..bef03e9 100644
--- a/include/llvm/Analysis/LoopInfo.h
+++ b/include/llvm/Analysis/LoopInfo.h
@@ -79,7 +79,7 @@ class LoopBase {
operator=(const LoopBase<BlockT, LoopT> &) LLVM_DELETED_FUNCTION;
public:
/// Loop ctor - This creates an empty loop.
- LoopBase() : ParentLoop(0) {}
+ LoopBase() : ParentLoop(nullptr) {}
~LoopBase() {
for (size_t i = 0, e = SubLoops.size(); i != e; ++i)
delete SubLoops[i];
@@ -106,7 +106,7 @@ public:
///
bool contains(const LoopT *L) const {
if (L == this) return true;
- if (L == 0) return false;
+ if (!L) return false;
return contains(L->getParentLoop());
}
@@ -265,7 +265,7 @@ public:
/// updates the loop depth of the new child.
///
void addChildLoop(LoopT *NewChild) {
- assert(NewChild->ParentLoop == 0 && "NewChild already has a parent!");
+ assert(!NewChild->ParentLoop && "NewChild already has a parent!");
NewChild->ParentLoop = static_cast<LoopT *>(this);
SubLoops.push_back(NewChild);
}
@@ -278,7 +278,7 @@ public:
LoopT *Child = *I;
assert(Child->ParentLoop == this && "Child is not a child of this loop!");
SubLoops.erase(SubLoops.begin()+(I-begin()));
- Child->ParentLoop = 0;
+ Child->ParentLoop = nullptr;
return Child;
}
@@ -333,7 +333,7 @@ public:
protected:
friend class LoopInfoBase<BlockT, LoopT>;
- explicit LoopBase(BlockT *BB) : ParentLoop(0) {
+ explicit LoopBase(BlockT *BB) : ParentLoop(nullptr) {
Blocks.push_back(BB);
DenseBlockSet.insert(BB);
}
@@ -372,7 +372,7 @@ public:
/// If null, the terminator of the loop preheader is used.
///
bool makeLoopInvariant(Value *V, bool &Changed,
- Instruction *InsertPt = 0) const;
+ Instruction *InsertPt = nullptr) const;
/// makeLoopInvariant - If the given instruction is inside of the
/// loop and it can be hoisted, do so to make it trivially loop-invariant.
@@ -384,7 +384,7 @@ public:
/// If null, the terminator of the loop preheader is used.
///
bool makeLoopInvariant(Instruction *I, bool &Changed,
- Instruction *InsertPt = 0) const;
+ Instruction *InsertPt = nullptr) const;
/// getCanonicalInductionVariable - Check to see if the loop has a canonical
/// induction variable: an integer recurrence that starts at 0 and increments
@@ -453,6 +453,31 @@ public:
void dump() const;
+ /// \brief Return the debug location of the start of this loop.
+ /// This looks for a BB terminating instruction with a known debug
+ /// location by looking at the preheader and header blocks. If it
+ /// cannot find a terminating instruction with location information,
+ /// it returns an unknown location.
+ DebugLoc getStartLoc() const {
+ DebugLoc StartLoc;
+ BasicBlock *HeadBB;
+
+ // Try the pre-header first.
+ if ((HeadBB = getLoopPreheader()) != nullptr) {
+ StartLoc = HeadBB->getTerminator()->getDebugLoc();
+ if (!StartLoc.isUnknown())
+ return StartLoc;
+ }
+
+ // If we have no pre-header or there are no instructions with debug
+ // info in it, try the header.
+ HeadBB = getHeader();
+ if (HeadBB)
+ StartLoc = HeadBB->getTerminator()->getDebugLoc();
+
+ return StartLoc;
+ }
+
private:
friend class LoopInfoBase<BasicBlock, Loop>;
explicit Loop(BasicBlock *BB) : LoopBase<BasicBlock, Loop>(BB) {}
@@ -531,7 +556,7 @@ public:
LoopT *removeLoop(iterator I) {
assert(I != end() && "Cannot remove end iterator!");
LoopT *L = *I;
- assert(L->getParentLoop() == 0 && "Not a top-level loop!");
+ assert(!L->getParentLoop() && "Not a top-level loop!");
TopLevelLoops.erase(TopLevelLoops.begin() + (I-begin()));
return L;
}
@@ -555,14 +580,14 @@ public:
std::find(TopLevelLoops.begin(), TopLevelLoops.end(), OldLoop);
assert(I != TopLevelLoops.end() && "Old loop not at top level!");
*I = NewLoop;
- assert(NewLoop->ParentLoop == 0 && OldLoop->ParentLoop == 0 &&
+ assert(!NewLoop->ParentLoop && !OldLoop->ParentLoop &&
"Loops already embedded into a subloop!");
}
/// addTopLevelLoop - This adds the specified loop to the collection of
/// top-level loops.
void addTopLevelLoop(LoopT *New) {
- assert(New->getParentLoop() == 0 && "Loop already in subloop!");
+ assert(!New->getParentLoop() && "Loop already in subloop!");
TopLevelLoops.push_back(New);
}
@@ -583,7 +608,7 @@ public:
static bool isNotAlreadyContainedIn(const LoopT *SubLoop,
const LoopT *ParentLoop) {
- if (SubLoop == 0) return true;
+ if (!SubLoop) return true;
if (SubLoop == ParentLoop) return false;
return isNotAlreadyContainedIn(SubLoop->getParentLoop(), ParentLoop);
}
@@ -660,7 +685,7 @@ public:
void releaseMemory() override { LI.releaseMemory(); }
- void print(raw_ostream &O, const Module* M = 0) const override;
+ void print(raw_ostream &O, const Module* M = nullptr) const override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
diff --git a/include/llvm/Analysis/LoopInfoImpl.h b/include/llvm/Analysis/LoopInfoImpl.h
index dd2dc28..948be0f 100644
--- a/include/llvm/Analysis/LoopInfoImpl.h
+++ b/include/llvm/Analysis/LoopInfoImpl.h
@@ -53,7 +53,7 @@ BlockT *LoopBase<BlockT, LoopT>::getExitingBlock() const {
getExitingBlocks(ExitingBlocks);
if (ExitingBlocks.size() == 1)
return ExitingBlocks[0];
- return 0;
+ return nullptr;
}
/// getExitBlocks - Return all of the successor blocks of this loop. These
@@ -80,7 +80,7 @@ BlockT *LoopBase<BlockT, LoopT>::getExitBlock() const {
getExitBlocks(ExitBlocks);
if (ExitBlocks.size() == 1)
return ExitBlocks[0];
- return 0;
+ return nullptr;
}
/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
@@ -108,14 +108,14 @@ template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader() const {
// Keep track of nodes outside the loop branching to the header...
BlockT *Out = getLoopPredecessor();
- if (!Out) return 0;
+ if (!Out) return nullptr;
// Make sure there is only one exit out of the preheader.
typedef GraphTraits<BlockT*> BlockTraits;
typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
++SI;
if (SI != BlockTraits::child_end(Out))
- return 0; // Multiple exits from the block, must not be a preheader.
+ return nullptr; // Multiple exits from the block, must not be a preheader.
// The predecessor has exactly one successor, so it is a preheader.
return Out;
@@ -129,7 +129,7 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader() const {
template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
// Keep track of nodes outside the loop branching to the header...
- BlockT *Out = 0;
+ BlockT *Out = nullptr;
// Loop over the predecessors of the header node...
BlockT *Header = getHeader();
@@ -140,7 +140,7 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
typename InvBlockTraits::NodeType *N = *PI;
if (!contains(N)) { // If the block is not in the loop...
if (Out && Out != N)
- return 0; // Multiple predecessors outside the loop
+ return nullptr; // Multiple predecessors outside the loop
Out = N;
}
}
@@ -160,11 +160,11 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopLatch() const {
InvBlockTraits::child_begin(Header);
typename InvBlockTraits::ChildIteratorType PE =
InvBlockTraits::child_end(Header);
- BlockT *Latch = 0;
+ BlockT *Latch = nullptr;
for (; PI != PE; ++PI) {
typename InvBlockTraits::NodeType *N = *PI;
if (contains(N)) {
- if (Latch) return 0;
+ if (Latch) return nullptr;
Latch = N;
}
}
@@ -188,7 +188,7 @@ addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LIB) {
assert((Blocks.empty() || LIB[getHeader()] == this) &&
"Incorrect LI specified for this loop!");
assert(NewBB && "Cannot add a null basic block to the loop!");
- assert(LIB[NewBB] == 0 && "BasicBlock already in the loop!");
+ assert(!LIB[NewBB] && "BasicBlock already in the loop!");
LoopT *L = static_cast<LoopT *>(this);
@@ -210,12 +210,12 @@ template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
replaceChildLoopWith(LoopT *OldChild, LoopT *NewChild) {
assert(OldChild->ParentLoop == this && "This loop is already broken!");
- assert(NewChild->ParentLoop == 0 && "NewChild already has a parent!");
+ assert(!NewChild->ParentLoop && "NewChild already has a parent!");
typename std::vector<LoopT *>::iterator I =
std::find(SubLoops.begin(), SubLoops.end(), OldChild);
assert(I != SubLoops.end() && "OldChild not in loop!");
*I = NewChild;
- OldChild->ParentLoop = 0;
+ OldChild->ParentLoop = nullptr;
NewChild->ParentLoop = static_cast<LoopT *>(this);
}
@@ -270,11 +270,10 @@ void LoopBase<BlockT, LoopT>::verifyLoop() const {
// though it is permitted if the predecessor is not itself actually
// reachable.
BlockT *EntryBB = BB->getParent()->begin();
- for (df_iterator<BlockT *> NI = df_begin(EntryBB),
- NE = df_end(EntryBB); NI != NE; ++NI)
- for (unsigned i = 0, e = OutsideLoopPreds.size(); i != e; ++i)
- assert(*NI != OutsideLoopPreds[i] &&
- "Loop has multiple entry points!");
+ for (BlockT *CB : depth_first(EntryBB))
+ for (unsigned i = 0, e = OutsideLoopPreds.size(); i != e; ++i)
+ assert(CB != OutsideLoopPreds[i] &&
+ "Loop has multiple entry points!");
}
assert(HasInsideLoopPreds && "Loop block has no in-loop predecessors!");
assert(HasInsideLoopSuccs && "Loop block has no in-loop successors!");
diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h
index ff4bc22..d414680 100644
--- a/include/llvm/Analysis/MemoryBuiltins.h
+++ b/include/llvm/Analysis/MemoryBuiltins.h
@@ -233,7 +233,7 @@ class ObjectSizeOffsetEvaluator
bool RoundToAlign;
SizeOffsetEvalType unknown() {
- return std::make_pair((Value*)0, (Value*)0);
+ return std::make_pair(nullptr, nullptr);
}
SizeOffsetEvalType compute_(Value *V);
diff --git a/include/llvm/Analysis/MemoryDependenceAnalysis.h b/include/llvm/Analysis/MemoryDependenceAnalysis.h
index 123d435..1c4441b 100644
--- a/include/llvm/Analysis/MemoryDependenceAnalysis.h
+++ b/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -97,7 +97,7 @@ namespace llvm {
PairTy Value;
explicit MemDepResult(PairTy V) : Value(V) {}
public:
- MemDepResult() : Value(0, Invalid) {}
+ MemDepResult() : Value(nullptr, Invalid) {}
/// get methods: These are static ctor methods for creating various
/// MemDepResult kinds.
@@ -155,7 +155,7 @@ namespace llvm {
/// getInst() - If this is a normal dependency, return the instruction that
/// is depended on. Otherwise, return null.
Instruction *getInst() const {
- if (Value.getInt() == Other) return NULL;
+ if (Value.getInt() == Other) return nullptr;
return Value.getPointer();
}
@@ -285,7 +285,8 @@ namespace llvm {
/// pointer. May be null if there are no tags or conflicting tags.
const MDNode *TBAATag;
- NonLocalPointerInfo() : Size(AliasAnalysis::UnknownSize), TBAATag(0) {}
+ NonLocalPointerInfo()
+ : Size(AliasAnalysis::UnknownSize), TBAATag(nullptr) {}
};
/// CachedNonLocalPointerInfo - This map stores the cached results of doing
@@ -401,7 +402,7 @@ namespace llvm {
bool isLoad,
BasicBlock::iterator ScanIt,
BasicBlock *BB,
- Instruction *QueryInst = 0);
+ Instruction *QueryInst = nullptr);
/// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
diff --git a/include/llvm/Analysis/PHITransAddr.h b/include/llvm/Analysis/PHITransAddr.h
index 6d70edd..69f5907 100644
--- a/include/llvm/Analysis/PHITransAddr.h
+++ b/include/llvm/Analysis/PHITransAddr.h
@@ -45,7 +45,8 @@ class PHITransAddr {
/// InstInputs - The inputs for our symbolic address.
SmallVector<Instruction*, 4> InstInputs;
public:
- PHITransAddr(Value *addr, const DataLayout *DL) : Addr(addr), DL(DL), TLI(0) {
+ PHITransAddr(Value *addr, const DataLayout *DL)
+ : Addr(addr), DL(DL), TLI(nullptr) {
// If the address is an instruction, the whole thing is considered an input.
if (Instruction *I = dyn_cast<Instruction>(Addr))
InstInputs.push_back(I);
diff --git a/include/llvm/Analysis/PtrUseVisitor.h b/include/llvm/Analysis/PtrUseVisitor.h
index 572d5d7..6e61fc3 100644
--- a/include/llvm/Analysis/PtrUseVisitor.h
+++ b/include/llvm/Analysis/PtrUseVisitor.h
@@ -48,13 +48,13 @@ public:
/// analysis and whether the visit completed or aborted early.
class PtrInfo {
public:
- PtrInfo() : AbortedInfo(0, false), EscapedInfo(0, false) {}
+ PtrInfo() : AbortedInfo(nullptr, false), EscapedInfo(nullptr, false) {}
/// \brief Reset the pointer info, clearing all state.
void reset() {
- AbortedInfo.setPointer(0);
+ AbortedInfo.setPointer(nullptr);
AbortedInfo.setInt(false);
- EscapedInfo.setPointer(0);
+ EscapedInfo.setPointer(nullptr);
EscapedInfo.setInt(false);
}
@@ -76,14 +76,14 @@ public:
/// \brief Mark the visit as aborted. Intended for use in a void return.
/// \param I The instruction which caused the visit to abort, if available.
- void setAborted(Instruction *I = 0) {
+ void setAborted(Instruction *I = nullptr) {
AbortedInfo.setInt(true);
AbortedInfo.setPointer(I);
}
/// \brief Mark the pointer as escaped. Intended for use in a void return.
/// \param I The instruction which escapes the pointer, if available.
- void setEscaped(Instruction *I = 0) {
+ void setEscaped(Instruction *I = nullptr) {
EscapedInfo.setInt(true);
EscapedInfo.setPointer(I);
}
@@ -92,7 +92,7 @@ public:
/// for use in a void return.
/// \param I The instruction which both escapes the pointer and aborts the
/// visit, if available.
- void setEscapedAndAborted(Instruction *I = 0) {
+ void setEscapedAndAborted(Instruction *I = nullptr) {
setEscaped(I);
setAborted(I);
}
diff --git a/include/llvm/Analysis/RegionInfo.h b/include/llvm/Analysis/RegionInfo.h
index 4d55408..82a788d 100644
--- a/include/llvm/Analysis/RegionInfo.h
+++ b/include/llvm/Analysis/RegionInfo.h
@@ -33,6 +33,7 @@
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Support/Allocator.h"
#include <map>
+#include <memory>
namespace llvm {
@@ -213,7 +214,7 @@ class Region : public RegionNode {
// (The entry BasicBlock is part of RegionNode)
BasicBlock *exit;
- typedef std::vector<Region*> RegionSet;
+ typedef std::vector<std::unique_ptr<Region>> RegionSet;
// The subregions of this region.
RegionSet children;
@@ -246,7 +247,7 @@ public:
/// @param Parent The surrounding region or NULL if this is a top level
/// region.
Region(BasicBlock *Entry, BasicBlock *Exit, RegionInfo* RI,
- DominatorTree *DT, Region *Parent = 0);
+ DominatorTree *DT, Region *Parent = nullptr);
/// Delete the Region and all its subregions.
~Region();
@@ -311,7 +312,7 @@ public:
/// @brief Check if a Region is the TopLevel region.
///
/// The toplevel region represents the whole function.
- bool isTopLevelRegion() const { return exit == NULL; }
+ bool isTopLevelRegion() const { return exit == nullptr; }
/// @brief Return a new (non-canonical) region, that is obtained by joining
/// this region with its predecessors.
@@ -515,7 +516,7 @@ public:
}
// Construct the end iterator.
- block_iterator_wrapper() : super(df_end<pointer>((BasicBlock *)0)) {}
+ block_iterator_wrapper() : super(df_end<pointer>((BasicBlock *)nullptr)) {}
/*implicit*/ block_iterator_wrapper(super I) : super(I) {}
diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h
index 06489d8..0570826 100644
--- a/include/llvm/Analysis/ScalarEvolution.h
+++ b/include/llvm/Analysis/ScalarEvolution.h
@@ -210,7 +210,7 @@ namespace llvm {
void deleted() override;
void allUsesReplacedWith(Value *New) override;
public:
- SCEVCallbackVH(Value *V, ScalarEvolution *SE = 0);
+ SCEVCallbackVH(Value *V, ScalarEvolution *SE = nullptr);
};
friend class SCEVCallbackVH;
@@ -291,7 +291,7 @@ namespace llvm {
const SCEV *ExactNotTaken;
PointerIntPair<ExitNotTakenInfo*, 1> NextExit;
- ExitNotTakenInfo() : ExitingBlock(0), ExactNotTaken(0) {}
+ ExitNotTakenInfo() : ExitingBlock(nullptr), ExactNotTaken(nullptr) {}
/// isCompleteList - Return true if all loop exits are computable.
bool isCompleteList() const {
@@ -321,7 +321,7 @@ namespace llvm {
const SCEV *Max;
public:
- BackedgeTakenInfo() : Max(0) {}
+ BackedgeTakenInfo() : Max(nullptr) {}
/// Initialize BackedgeTakenInfo from a list of exact exit counts.
BackedgeTakenInfo(
@@ -894,10 +894,19 @@ namespace llvm {
/// indirect operand.
bool hasOperand(const SCEV *S, const SCEV *Op) const;
+ /// Return the size of an element read or written by Inst.
+ const SCEV *getElementSize(Instruction *Inst);
+
+ /// Compute the array dimensions Sizes from the set of Terms extracted from
+ /// the memory access function of this SCEVAddRecExpr.
+ void findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
+ SmallVectorImpl<const SCEV *> &Sizes,
+ const SCEV *ElementSize) const;
+
bool runOnFunction(Function &F) override;
void releaseMemory() override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
- void print(raw_ostream &OS, const Module* = 0) const override;
+ void print(raw_ostream &OS, const Module* = nullptr) const override;
void verifyAnalysis() const override;
private:
diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h
index 9162735..b9bef97 100644
--- a/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -92,7 +92,7 @@ namespace llvm {
public:
/// SCEVExpander - Construct a SCEVExpander in "canonical" mode.
explicit SCEVExpander(ScalarEvolution &se, const char *name)
- : SE(se), IVName(name), IVIncInsertLoop(0), IVIncInsertPos(0),
+ : SE(se), IVName(name), IVIncInsertLoop(nullptr), IVIncInsertPos(nullptr),
CanonicalMode(true), LSRMode(false),
Builder(se.getContext(), TargetFolder(se.DL)) {
#ifndef NDEBUG
@@ -131,7 +131,7 @@ namespace llvm {
/// representative. Return the number of phis eliminated.
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
SmallVectorImpl<WeakVH> &DeadInsts,
- const TargetTransformInfo *TTI = NULL);
+ const TargetTransformInfo *TTI = nullptr);
/// expandCodeFor - Insert code to directly compute the specified SCEV
/// expression into the program. The inserted code is inserted into the
@@ -219,7 +219,7 @@ namespace llvm {
/// expression into the program. The inserted code is inserted into the
/// SCEVExpander's current insertion point. If a type is specified, the
/// result will be expanded to have that type, with a cast if necessary.
- Value *expandCodeFor(const SCEV *SH, Type *Ty = 0);
+ Value *expandCodeFor(const SCEV *SH, Type *Ty = nullptr);
/// getRelevantLoop - Determine the most "relevant" loop for the given SCEV.
const Loop *getRelevantLoop(const SCEV *);
diff --git a/include/llvm/Analysis/ScalarEvolutionExpressions.h b/include/llvm/Analysis/ScalarEvolutionExpressions.h
index ed8c133..01b034f 100644
--- a/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -14,6 +14,7 @@
#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
#define LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
+#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Support/ErrorHandling.h"
@@ -151,8 +152,12 @@ namespace llvm {
}
typedef const SCEV *const *op_iterator;
+ typedef iterator_range<op_iterator> op_range;
op_iterator op_begin() const { return Operands; }
op_iterator op_end() const { return Operands + NumOperands; }
+ op_range operands() const {
+ return make_range(op_begin(), op_end());
+ }
Type *getType() const { return getOperand(0)->getType(); }
@@ -352,12 +357,83 @@ namespace llvm {
return S->getSCEVType() == scAddRecExpr;
}
- /// Splits the SCEV into two vectors of SCEVs representing the subscripts
- /// and sizes of an array access. Returns the remainder of the
- /// delinearization that is the offset start of the array.
- const SCEV *delinearize(ScalarEvolution &SE,
- SmallVectorImpl<const SCEV *> &Subscripts,
- SmallVectorImpl<const SCEV *> &Sizes) const;
+ /// Collect parametric terms occurring in step expressions.
+ void collectParametricTerms(ScalarEvolution &SE,
+ SmallVectorImpl<const SCEV *> &Terms) const;
+
+ /// Return in Subscripts the access functions for each dimension in Sizes.
+ void computeAccessFunctions(ScalarEvolution &SE,
+ SmallVectorImpl<const SCEV *> &Subscripts,
+ SmallVectorImpl<const SCEV *> &Sizes) const;
+
+ /// Split this SCEVAddRecExpr into two vectors of SCEVs representing the
+ /// subscripts and sizes of an array access.
+ ///
+ /// The delinearization is a 3 step process: the first two steps compute the
+ /// sizes of each subscript and the third step computes the access functions
+ /// for the delinearized array:
+ ///
+ /// 1. Find the terms in the step functions
+ /// 2. Compute the array size
+ /// 3. Compute the access function: divide the SCEV by the array size
+ /// starting with the innermost dimensions found in step 2. The Quotient
+ /// is the SCEV to be divided in the next step of the recursion. The
+ /// Remainder is the subscript of the innermost dimension. Loop over all
+ /// array dimensions computed in step 2.
+ ///
+ /// To compute a uniform array size for several memory accesses to the same
+ /// object, one can collect in step 1 all the step terms for all the memory
+ /// accesses, and compute in step 2 a unique array shape. This guarantees
+ /// that the array shape will be the same across all memory accesses.
+ ///
+ /// FIXME: We could derive the result of steps 1 and 2 from a description of
+ /// the array shape given in metadata.
+ ///
+ /// Example:
+ ///
+ /// A[][n][m]
+ ///
+ /// for i
+ /// for j
+ /// for k
+ /// A[j+k][2i][5i] =
+ ///
+ /// The initial SCEV:
+ ///
+ /// A[{{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k]
+ ///
+ /// 1. Find the different terms in the step functions:
+ /// -> [2*m, 5, n*m, n*m]
+ ///
+ /// 2. Compute the array size: sort and unique them
+ /// -> [n*m, 2*m, 5]
+ /// find the GCD of all the terms = 1
+ /// divide by the GCD and erase constant terms
+ /// -> [n*m, 2*m]
+ /// GCD = m
+ /// divide by GCD -> [n, 2]
+ /// remove constant terms
+ /// -> [n]
+ /// size of the array is A[unknown][n][m]
+ ///
+ /// 3. Compute the access function
+ /// a. Divide {{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k by the innermost size m
+ /// Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k
+ /// Remainder: {{{0,+,5}_i, +, 0}_j, +, 0}_k
+ /// The remainder is the subscript of the innermost array dimension: [5i].
+ ///
+ /// b. Divide Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k by next outer size n
+ /// Quotient: {{{0,+,0}_i, +, 1}_j, +, 1}_k
+ /// Remainder: {{{0,+,2}_i, +, 0}_j, +, 0}_k
+ /// The Remainder is the subscript of the next array dimension: [2i].
+ ///
+ /// The subscript of the outermost dimension is the Quotient: [j+k].
+ ///
+ /// Overall, we have: A[][n][m], and the access function: A[j+k][2i][5i].
+ void delinearize(ScalarEvolution &SE,
+ SmallVectorImpl<const SCEV *> &Subscripts,
+ SmallVectorImpl<const SCEV *> &Sizes,
+ const SCEV *ElementSize) const;
};
//===--------------------------------------------------------------------===//
diff --git a/include/llvm/Analysis/SparsePropagation.h b/include/llvm/Analysis/SparsePropagation.h
index 76c8ccf..65ff2f6 100644
--- a/include/llvm/Analysis/SparsePropagation.h
+++ b/include/llvm/Analysis/SparsePropagation.h
@@ -82,7 +82,7 @@ public:
/// constant value, return it. Otherwise return null. The returned value
/// must be in the same LLVM type as Val.
virtual Constant *GetConstant(LatticeVal LV, Value *Val, SparseSolver &SS) {
- return 0;
+ return nullptr;
}
/// ComputeArgument - Given a formal argument value, compute and return a
diff --git a/include/llvm/Analysis/TargetTransformInfo.h b/include/llvm/Analysis/TargetTransformInfo.h
index 2ac6ffa..79fe1dc 100644
--- a/include/llvm/Analysis/TargetTransformInfo.h
+++ b/include/llvm/Analysis/TargetTransformInfo.h
@@ -105,7 +105,7 @@ public:
/// The returned cost is defined in terms of \c TargetCostConstants, see its
/// comments for a detailed explanation of the cost values.
virtual unsigned getOperationCost(unsigned Opcode, Type *Ty,
- Type *OpTy = 0) const;
+ Type *OpTy = nullptr) const;
/// \brief Estimate the cost of a GEP operation when lowered.
///
@@ -356,7 +356,7 @@ public:
/// The index and subtype parameters are used by the subvector insertion and
/// extraction shuffle kinds.
virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
- Type *SubTp = 0) const;
+ Type *SubTp = nullptr) const;
/// \return The expected cost of cast instructions, such as bitcast, trunc,
/// zext, etc.
@@ -369,7 +369,7 @@ public:
/// \returns The expected cost of compare and select instructions.
virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy = 0) const;
+ Type *CondTy = nullptr) const;
/// \return The expected cost of vector Insert and Extract.
/// Use -1 to indicate that there is no information on the index value.
diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h
index 0392f98..ce78967 100644
--- a/include/llvm/Analysis/ValueTracking.h
+++ b/include/llvm/Analysis/ValueTracking.h
@@ -27,24 +27,22 @@ namespace llvm {
class MDNode;
class TargetLibraryInfo;
- /// ComputeMaskedBits - Determine which of the bits specified in Mask are
- /// known to be either zero or one and return them in the KnownZero/KnownOne
- /// bit sets. This code only analyzes bits in Mask, in order to short-circuit
- /// processing.
+ /// Determine which bits of V are known to be either zero or one and return
+ /// them in the KnownZero/KnownOne bit sets.
///
/// This function is defined on values with integer type, values with pointer
/// type (but only if TD is non-null), and vectors of integers. In the case
- /// where V is a vector, the mask, known zero, and known one values are the
+ /// where V is a vector, the known zero and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
- void ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
- const DataLayout *TD = 0, unsigned Depth = 0);
- void computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero);
+ void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
+ const DataLayout *TD = nullptr, unsigned Depth = 0);
+ void computeKnownBitsLoad(const MDNode &Ranges, APInt &KnownZero);
/// ComputeSignBit - Determine whether the sign bit is known to be zero or
- /// one. Convenience wrapper around ComputeMaskedBits.
+ /// one. Convenience wrapper around computeKnownBits.
void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
- const DataLayout *TD = 0, unsigned Depth = 0);
+ const DataLayout *TD = nullptr, unsigned Depth = 0);
/// isKnownToBeAPowerOfTwo - Return true if the given value is known to have
/// exactly one bit set when defined. For vectors return true if every
@@ -57,7 +55,8 @@ namespace llvm {
/// when defined. For vectors return true if every element is known to be
/// non-zero when defined. Supports values with integer or pointer type and
/// vectors of integers.
- bool isKnownNonZero(Value *V, const DataLayout *TD = 0, unsigned Depth = 0);
+ bool isKnownNonZero(Value *V, const DataLayout *TD = nullptr,
+ unsigned Depth = 0);
/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
/// this predicate to simplify operations downstream. Mask is known to be
@@ -69,7 +68,7 @@ namespace llvm {
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
bool MaskedValueIsZero(Value *V, const APInt &Mask,
- const DataLayout *TD = 0, unsigned Depth = 0);
+ const DataLayout *TD = nullptr, unsigned Depth = 0);
/// ComputeNumSignBits - Return the number of times the sign bit of the
@@ -80,7 +79,7 @@ namespace llvm {
///
/// 'Op' must have a scalar integer type.
///
- unsigned ComputeNumSignBits(Value *Op, const DataLayout *TD = 0,
+ unsigned ComputeNumSignBits(Value *Op, const DataLayout *TD = nullptr,
unsigned Depth = 0);
/// ComputeMultiple - This function computes the integer multiple of Base that
@@ -112,7 +111,7 @@ namespace llvm {
/// insertvalues when a part of a nested struct is extracted.
Value *FindInsertedValue(Value *V,
ArrayRef<unsigned> idx_range,
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
/// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if
/// it can be expressed as a base pointer plus a constant offset. Return the
@@ -143,10 +142,10 @@ namespace llvm {
/// being addressed. Note that the returned value has pointer type if the
/// specified value does. If the MaxLookup value is non-zero, it limits the
/// number of instructions to be stripped off.
- Value *GetUnderlyingObject(Value *V, const DataLayout *TD = 0,
+ Value *GetUnderlyingObject(Value *V, const DataLayout *TD = nullptr,
unsigned MaxLookup = 6);
static inline const Value *
- GetUnderlyingObject(const Value *V, const DataLayout *TD = 0,
+ GetUnderlyingObject(const Value *V, const DataLayout *TD = nullptr,
unsigned MaxLookup = 6) {
return GetUnderlyingObject(const_cast<Value *>(V), TD, MaxLookup);
}
@@ -156,7 +155,7 @@ namespace llvm {
/// multiple objects.
void GetUnderlyingObjects(Value *V,
SmallVectorImpl<Value *> &Objects,
- const DataLayout *TD = 0,
+ const DataLayout *TD = nullptr,
unsigned MaxLookup = 6);
/// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
@@ -182,12 +181,12 @@ namespace llvm {
/// However, this method can return true for instructions that read memory;
/// for such instructions, moving them may change the resulting value.
bool isSafeToSpeculativelyExecute(const Value *V,
- const DataLayout *TD = 0);
+ const DataLayout *TD = nullptr);
/// isKnownNonNull - Return true if this pointer couldn't possibly be null by
/// its definition. This returns true for allocas, non-extern-weak globals
/// and byval arguments.
- bool isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI = 0);
+ bool isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI = nullptr);
} // end namespace llvm
diff --git a/include/llvm/Bitcode/BitstreamReader.h b/include/llvm/Bitcode/BitstreamReader.h
index fcbf426..6f478b7 100644
--- a/include/llvm/Bitcode/BitstreamReader.h
+++ b/include/llvm/Bitcode/BitstreamReader.h
@@ -111,7 +111,7 @@ public:
i != e; ++i)
if (BlockInfoRecords[i].BlockID == BlockID)
return &BlockInfoRecords[i];
- return 0;
+ return nullptr;
}
BlockInfo &getOrCreateBlockInfo(unsigned BlockID) {
@@ -200,9 +200,9 @@ class BitstreamCursor {
public:
- BitstreamCursor() : BitStream(0), NextChar(0) {
- }
- BitstreamCursor(const BitstreamCursor &RHS) : BitStream(0), NextChar(0) {
+ BitstreamCursor() : BitStream(nullptr), NextChar(0) {}
+ BitstreamCursor(const BitstreamCursor &RHS)
+ : BitStream(nullptr), NextChar(0) {
operator=(RHS);
}
@@ -490,7 +490,7 @@ public:
/// EnterSubBlock - Having read the ENTER_SUBBLOCK abbrevid, enter
/// the block, and return true if the block has an error.
- bool EnterSubBlock(unsigned BlockID, unsigned *NumWordsP = 0);
+ bool EnterSubBlock(unsigned BlockID, unsigned *NumWordsP = nullptr);
bool ReadBlockEnd() {
if (BlockScope.empty()) return true;
@@ -541,7 +541,7 @@ public:
void skipRecord(unsigned AbbrevID);
unsigned readRecord(unsigned AbbrevID, SmallVectorImpl<uint64_t> &Vals,
- StringRef *Blob = 0);
+ StringRef *Blob = nullptr);
//===--------------------------------------------------------------------===//
// Abbrev Processing
diff --git a/include/llvm/Bitcode/BitstreamWriter.h b/include/llvm/Bitcode/BitstreamWriter.h
index ef88a88..dcfebd9 100644
--- a/include/llvm/Bitcode/BitstreamWriter.h
+++ b/include/llvm/Bitcode/BitstreamWriter.h
@@ -204,7 +204,7 @@ public:
i != e; ++i)
if (BlockInfoRecords[i].BlockID == BlockID)
return &BlockInfoRecords[i];
- return 0;
+ return nullptr;
}
void EnterSubblock(unsigned BlockID, unsigned CodeLen) {
@@ -347,7 +347,7 @@ private:
EmitAbbreviatedField(EltEnc, (unsigned char)BlobData[i]);
// Know that blob data is consumed for assertion below.
- BlobData = 0;
+ BlobData = nullptr;
} else {
// Emit a vbr6 to indicate the number of elements present.
EmitVBR(static_cast<uint32_t>(Vals.size()-RecordIdx), 6);
@@ -378,7 +378,7 @@ private:
WriteByte((unsigned char)BlobData[i]);
// Know that blob data is consumed for assertion below.
- BlobData = 0;
+ BlobData = nullptr;
} else {
for (unsigned e = Vals.size(); RecordIdx != e; ++RecordIdx) {
assert(isUInt<8>(Vals[RecordIdx]) &&
@@ -397,7 +397,7 @@ private:
}
}
assert(RecordIdx == Vals.size() && "Not all record operands emitted!");
- assert(BlobData == 0 &&
+ assert(BlobData == nullptr &&
"Blob data specified for record that doesn't use it!");
}
diff --git a/include/llvm/Bitcode/LLVMBitCodes.h b/include/llvm/Bitcode/LLVMBitCodes.h
index 7e6831b..04c08ab 100644
--- a/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/include/llvm/Bitcode/LLVMBitCodes.h
@@ -311,7 +311,7 @@ namespace bitc {
// 32 is unused.
FUNC_CODE_DEBUG_LOC_AGAIN = 33, // DEBUG_LOC_AGAIN
- FUNC_CODE_INST_CALL = 34, // CALL: [attr, fnty, fnid, args...]
+ FUNC_CODE_INST_CALL = 34, // CALL: [attr, cc, fnty, fnid, args...]
FUNC_CODE_DEBUG_LOC = 35, // DEBUG_LOC: [Line,Col,ScopeVal, IAVal]
FUNC_CODE_INST_FENCE = 36, // FENCE: [ordering, synchscope]
@@ -371,7 +371,8 @@ namespace bitc {
ATTR_KIND_BUILTIN = 35,
ATTR_KIND_COLD = 36,
ATTR_KIND_OPTIMIZE_NONE = 37,
- ATTR_KIND_IN_ALLOCA = 38
+ ATTR_KIND_IN_ALLOCA = 38,
+ ATTR_KIND_NON_NULL = 39
};
} // End bitc namespace
diff --git a/include/llvm/Bitcode/ReaderWriter.h b/include/llvm/Bitcode/ReaderWriter.h
index 0918e92..4c194a6 100644
--- a/include/llvm/Bitcode/ReaderWriter.h
+++ b/include/llvm/Bitcode/ReaderWriter.h
@@ -39,7 +39,7 @@ namespace llvm {
Module *getStreamedBitcodeModule(const std::string &name,
DataStreamer *streamer,
LLVMContext &Context,
- std::string *ErrMsg = 0);
+ std::string *ErrMsg = nullptr);
/// getBitcodeTargetTriple - Read the header of the specified bitcode
/// buffer and extract just the triple information. If successful,
@@ -48,7 +48,7 @@ namespace llvm {
/// if ErrMsg is non-null.
std::string getBitcodeTargetTriple(MemoryBuffer *Buffer,
LLVMContext &Context,
- std::string *ErrMsg = 0);
+ std::string *ErrMsg = nullptr);
/// Read the specified bitcode file, returning the module.
/// This method *never* takes ownership of Buffer.
diff --git a/include/llvm/CMakeLists.txt b/include/llvm/CMakeLists.txt
index 0f5c63d..ca4fd13 100644
--- a/include/llvm/CMakeLists.txt
+++ b/include/llvm/CMakeLists.txt
@@ -12,3 +12,9 @@ if( MSVC_IDE OR XCODE )
set_target_properties(llvm_headers_do_not_build PROPERTIES FOLDER "Misc"
EXCLUDE_FROM_DEFAULT_BUILD ON)
endif()
+
+# If we're doing an out-of-tree build, copy a module map for generated
+# header files into the build area.
+if (NOT "${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
+ configure_file(module.modulemap.build module.modulemap COPYONLY)
+endif (NOT "${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
diff --git a/include/llvm/CodeGen/Analysis.h b/include/llvm/CodeGen/Analysis.h
index 5f2bbd6..c3aefd4 100644
--- a/include/llvm/CodeGen/Analysis.h
+++ b/include/llvm/CodeGen/Analysis.h
@@ -54,7 +54,7 @@ inline unsigned ComputeLinearIndex(Type *Ty,
///
void ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
SmallVectorImpl<EVT> &ValueVTs,
- SmallVectorImpl<uint64_t> *Offsets = 0,
+ SmallVectorImpl<uint64_t> *Offsets = nullptr,
uint64_t StartingOffset = 0);
/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h
index d96d810..b53fb42 100644
--- a/include/llvm/CodeGen/AsmPrinter.h
+++ b/include/llvm/CodeGen/AsmPrinter.h
@@ -23,501 +23,490 @@
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
- class AsmPrinterHandler;
- class BlockAddress;
- class ByteStreamer;
- class GCStrategy;
- class Constant;
- class ConstantArray;
- class GCMetadataPrinter;
- class GlobalValue;
- class GlobalVariable;
- class MachineBasicBlock;
- class MachineFunction;
- class MachineInstr;
- class MachineLocation;
- class MachineLoopInfo;
- class MachineLoop;
- class MachineConstantPoolValue;
- class MachineJumpTableInfo;
- class MachineModuleInfo;
- class MCAsmInfo;
- class MCCFIInstruction;
- class MCContext;
- class MCInst;
- class MCInstrInfo;
- class MCSection;
- class MCStreamer;
- class MCSubtargetInfo;
- class MCSymbol;
- class MDNode;
- class DwarfDebug;
- class DwarfException;
- class Mangler;
- class TargetLoweringObjectFile;
- class DataLayout;
- class TargetMachine;
-
- /// AsmPrinter - This class is intended to be used as a driving class for all
- /// asm writers.
- class AsmPrinter : public MachineFunctionPass {
- public:
- /// Target machine description.
- ///
- TargetMachine &TM;
-
- /// Target Asm Printer information.
- ///
- const MCAsmInfo *MAI;
-
- const MCInstrInfo *MII;
- /// OutContext - This is the context for the output file that we are
- /// streaming. This owns all of the global MC-related objects for the
- /// generated translation unit.
- MCContext &OutContext;
-
- /// OutStreamer - This is the MCStreamer object for the file we are
- /// generating. This contains the transient state for the current
- /// translation unit that we are generating (such as the current section
- /// etc).
- MCStreamer &OutStreamer;
-
- /// The current machine function.
- const MachineFunction *MF;
-
- /// MMI - This is a pointer to the current MachineModuleInfo.
- MachineModuleInfo *MMI;
-
- /// Name-mangler for global names.
- ///
- Mangler *Mang;
-
- /// The symbol for the current function. This is recalculated at the
- /// beginning of each call to runOnMachineFunction().
- ///
- MCSymbol *CurrentFnSym;
-
- /// The symbol used to represent the start of the current function for the
- /// purpose of calculating its size (e.g. using the .size directive). By
- /// default, this is equal to CurrentFnSym.
- MCSymbol *CurrentFnSymForSize;
-
- private:
- // GCMetadataPrinters - The garbage collection metadata printer table.
- void *GCMetadataPrinters; // Really a DenseMap.
-
- /// VerboseAsm - Emit comments in assembly output if this is true.
- ///
- bool VerboseAsm;
- static char ID;
-
- /// If VerboseAsm is set, a pointer to the loop info for this
- /// function.
- MachineLoopInfo *LI;
-
- struct HandlerInfo {
- AsmPrinterHandler *Handler;
- const char *TimerName, *TimerGroupName;
- HandlerInfo(AsmPrinterHandler *Handler, const char *TimerName,
- const char *TimerGroupName)
- : Handler(Handler), TimerName(TimerName),
- TimerGroupName(TimerGroupName) {}
- };
- /// Handlers - a vector of all debug/EH info emitters we should use.
- /// This vector maintains ownership of the emitters.
- SmallVector<HandlerInfo, 1> Handlers;
-
- /// DD - If the target supports dwarf debug info, this pointer is non-null.
- DwarfDebug *DD;
-
- protected:
- explicit AsmPrinter(TargetMachine &TM, MCStreamer &Streamer);
-
- public:
- virtual ~AsmPrinter();
-
- DwarfDebug *getDwarfDebug() { return DD; }
-
- /// isVerbose - Return true if assembly output should contain comments.
- ///
- bool isVerbose() const { return VerboseAsm; }
-
- /// getFunctionNumber - Return a unique ID for the current function.
- ///
- unsigned getFunctionNumber() const;
-
- /// getObjFileLowering - Return information about object file lowering.
- const TargetLoweringObjectFile &getObjFileLowering() const;
-
- /// getDataLayout - Return information about data layout.
- const DataLayout &getDataLayout() const;
-
- /// getSubtargetInfo - Return information about subtarget.
- const MCSubtargetInfo &getSubtargetInfo() const;
-
- void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
-
- /// getTargetTriple - Return the target triple string.
- StringRef getTargetTriple() const;
-
- /// getCurrentSection() - Return the current section we are emitting to.
- const MCSection *getCurrentSection() const;
-
- void getNameWithPrefix(SmallVectorImpl<char> &Name,
- const GlobalValue *GV) const;
-
- MCSymbol *getSymbol(const GlobalValue *GV) const;
-
- //===------------------------------------------------------------------===//
- // MachineFunctionPass Implementation.
- //===------------------------------------------------------------------===//
-
- /// getAnalysisUsage - Record analysis usage.
- ///
- void getAnalysisUsage(AnalysisUsage &AU) const override;
-
- /// doInitialization - Set up the AsmPrinter when we are working on a new
- /// module. If your pass overrides this, it must make sure to explicitly
- /// call this implementation.
- bool doInitialization(Module &M) override;
+class AsmPrinterHandler;
+class BlockAddress;
+class ByteStreamer;
+class GCStrategy;
+class Constant;
+class ConstantArray;
+class GCMetadataPrinter;
+class GlobalValue;
+class GlobalVariable;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MachineLocation;
+class MachineLoopInfo;
+class MachineLoop;
+class MachineConstantPoolValue;
+class MachineJumpTableInfo;
+class MachineModuleInfo;
+class MCAsmInfo;
+class MCCFIInstruction;
+class MCContext;
+class MCInst;
+class MCInstrInfo;
+class MCSection;
+class MCStreamer;
+class MCSubtargetInfo;
+class MCSymbol;
+class MDNode;
+class DwarfDebug;
+class DwarfException;
+class Mangler;
+class TargetLoweringObjectFile;
+class DataLayout;
+class TargetMachine;
+
+/// This class is intended to be used as a driving class for all asm writers.
+class AsmPrinter : public MachineFunctionPass {
+public:
+ /// Target machine description.
+ ///
+ TargetMachine &TM;
+
+ /// Target Asm Printer information.
+ ///
+ const MCAsmInfo *MAI;
+
+ const MCInstrInfo *MII;
+ /// This is the context for the output file that we are streaming. This owns
+ /// all of the global MC-related objects for the generated translation unit.
+ MCContext &OutContext;
+
+ /// This is the MCStreamer object for the file we are generating. This
+ /// contains the transient state for the current translation unit that we are
+ /// generating (such as the current section etc).
+ MCStreamer &OutStreamer;
+
+ /// The current machine function.
+ const MachineFunction *MF;
+
+ /// This is a pointer to the current MachineModuleInfo.
+ MachineModuleInfo *MMI;
+
+ /// Name-mangler for global names.
+ ///
+ Mangler *Mang;
+
+ /// The symbol for the current function. This is recalculated at the beginning
+ /// of each call to runOnMachineFunction().
+ ///
+ MCSymbol *CurrentFnSym;
+
+ /// The symbol used to represent the start of the current function for the
+ /// purpose of calculating its size (e.g. using the .size directive). By
+ /// default, this is equal to CurrentFnSym.
+ MCSymbol *CurrentFnSymForSize;
+
+private:
+ // The garbage collection metadata printer table.
+ void *GCMetadataPrinters; // Really a DenseMap.
+
+ /// Emit comments in assembly output if this is true.
+ ///
+ bool VerboseAsm;
+ static char ID;
+
+ /// If VerboseAsm is set, a pointer to the loop info for this function.
+ MachineLoopInfo *LI;
+
+ struct HandlerInfo {
+ AsmPrinterHandler *Handler;
+ const char *TimerName, *TimerGroupName;
+ HandlerInfo(AsmPrinterHandler *Handler, const char *TimerName,
+ const char *TimerGroupName)
+ : Handler(Handler), TimerName(TimerName),
+ TimerGroupName(TimerGroupName) {}
+ };
+ /// A vector of all debug/EH info emitters we should use. This vector
+ /// maintains ownership of the emitters.
+ SmallVector<HandlerInfo, 1> Handlers;
- /// doFinalization - Shut down the asmprinter. If you override this in your
- /// pass, you must make sure to call it explicitly.
- bool doFinalization(Module &M) override;
-
- /// runOnMachineFunction - Emit the specified function out to the
- /// OutStreamer.
- bool runOnMachineFunction(MachineFunction &MF) override {
- SetupMachineFunction(MF);
- EmitFunctionHeader();
- EmitFunctionBody();
- return false;
- }
+ /// If the target supports dwarf debug info, this pointer is non-null.
+ DwarfDebug *DD;
- //===------------------------------------------------------------------===//
- // Coarse grained IR lowering routines.
- //===------------------------------------------------------------------===//
-
- /// SetupMachineFunction - This should be called when a new MachineFunction
- /// is being processed from runOnMachineFunction.
- void SetupMachineFunction(MachineFunction &MF);
+protected:
+ explicit AsmPrinter(TargetMachine &TM, MCStreamer &Streamer);
- /// EmitFunctionHeader - This method emits the header for the current
- /// function.
- void EmitFunctionHeader();
+public:
+ virtual ~AsmPrinter();
- /// EmitFunctionBody - This method emits the body and trailer for a
- /// function.
- void EmitFunctionBody();
+ DwarfDebug *getDwarfDebug() { return DD; }
- void emitCFIInstruction(const MachineInstr &MI);
-
- enum CFIMoveType {
- CFI_M_None,
- CFI_M_EH,
- CFI_M_Debug
- };
- CFIMoveType needsCFIMoves();
+ /// Return true if assembly output should contain comments.
+ ///
+ bool isVerbose() const { return VerboseAsm; }
- bool needsSEHMoves();
+ /// Return a unique ID for the current function.
+ ///
+ unsigned getFunctionNumber() const;
- /// EmitConstantPool - Print to the current output stream assembly
- /// representations of the constants in the constant pool MCP. This is
- /// used to print out constants which have been "spilled to memory" by
- /// the code generator.
- ///
- virtual void EmitConstantPool();
+ /// Return information about object file lowering.
+ const TargetLoweringObjectFile &getObjFileLowering() const;
- /// EmitJumpTableInfo - Print assembly representations of the jump tables
- /// used by the current function to the current output stream.
- ///
- void EmitJumpTableInfo();
+ /// Return information about data layout.
+ const DataLayout &getDataLayout() const;
- /// EmitGlobalVariable - Emit the specified global variable to the .s file.
- virtual void EmitGlobalVariable(const GlobalVariable *GV);
+ /// Return information about subtarget.
+ const MCSubtargetInfo &getSubtargetInfo() const;
- /// EmitSpecialLLVMGlobal - Check to see if the specified global is a
- /// special global used by LLVM. If so, emit it and return true, otherwise
- /// do nothing and return false.
- bool EmitSpecialLLVMGlobal(const GlobalVariable *GV);
+ void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
- /// EmitAlignment - Emit an alignment directive to the specified power of
- /// two boundary. For example, if you pass in 3 here, you will get an 8
- /// byte alignment. If a global value is specified, and if that global has
- /// an explicit alignment requested, it will override the alignment request
- /// if required for correctness.
- ///
- void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0) const;
-
- /// EmitBasicBlockStart - This method prints the label for the specified
- /// MachineBasicBlock, an alignment (if present) and a comment describing
- /// it if appropriate.
- void EmitBasicBlockStart(const MachineBasicBlock *MBB) const;
-
- /// \brief Print a general LLVM constant to the .s file.
- void EmitGlobalConstant(const Constant *CV);
-
-
- //===------------------------------------------------------------------===//
- // Overridable Hooks
- //===------------------------------------------------------------------===//
-
- // Targets can, or in the case of EmitInstruction, must implement these to
- // customize output.
-
- /// EmitStartOfAsmFile - This virtual method can be overridden by targets
- /// that want to emit something at the start of their file.
- virtual void EmitStartOfAsmFile(Module &) {}
-
- /// EmitEndOfAsmFile - This virtual method can be overridden by targets that
- /// want to emit something at the end of their file.
- virtual void EmitEndOfAsmFile(Module &) {}
-
- /// EmitFunctionBodyStart - Targets can override this to emit stuff before
- /// the first basic block in the function.
- virtual void EmitFunctionBodyStart() {}
-
- /// EmitFunctionBodyEnd - Targets can override this to emit stuff after
- /// the last basic block in the function.
- virtual void EmitFunctionBodyEnd() {}
-
- /// EmitInstruction - Targets should implement this to emit instructions.
- virtual void EmitInstruction(const MachineInstr *) {
- llvm_unreachable("EmitInstruction not implemented");
- }
-
- /// GetCPISymbol - Return the symbol for the specified constant pool entry.
- virtual MCSymbol *GetCPISymbol(unsigned CPID) const;
-
- virtual void EmitFunctionEntryLabel();
-
- virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
-
- /// EmitXXStructor - Targets can override this to change how global
- /// constants that are part of a C++ static/global constructor list are
- /// emitted.
- virtual void EmitXXStructor(const Constant *CV) {
- EmitGlobalConstant(CV);
- }
-
- /// isBlockOnlyReachableByFallthough - Return true if the basic block has
- /// exactly one predecessor and the control transfer mechanism between
- /// the predecessor and this block is a fall-through.
- virtual bool
- isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
-
- /// emitImplicitDef - Targets can override this to customize the output of
- /// IMPLICIT_DEF instructions in verbose mode.
- virtual void emitImplicitDef(const MachineInstr *MI) const;
-
- //===------------------------------------------------------------------===//
- // Symbol Lowering Routines.
- //===------------------------------------------------------------------===//
- public:
-
- /// GetTempSymbol - Return the MCSymbol corresponding to the assembler
- /// temporary label with the specified stem and unique ID.
- MCSymbol *GetTempSymbol(Twine Name, unsigned ID) const;
-
- /// GetTempSymbol - Return an assembler temporary label with the specified
- /// stem.
- MCSymbol *GetTempSymbol(Twine Name) const;
-
- /// Return the MCSymbol for a private symbol with global value name as its
- /// base, with the specified suffix.
- MCSymbol *getSymbolWithGlobalValueBase(const GlobalValue *GV,
- StringRef Suffix) const;
-
- /// GetExternalSymbolSymbol - Return the MCSymbol for the specified
- /// ExternalSymbol.
- MCSymbol *GetExternalSymbolSymbol(StringRef Sym) const;
-
- /// GetJTISymbol - Return the symbol for the specified jump table entry.
- MCSymbol *GetJTISymbol(unsigned JTID, bool isLinkerPrivate = false) const;
-
- /// GetJTSetSymbol - Return the symbol for the specified jump table .set
- /// FIXME: privatize to AsmPrinter.
- MCSymbol *GetJTSetSymbol(unsigned UID, unsigned MBBID) const;
-
- /// GetBlockAddressSymbol - Return the MCSymbol used to satisfy BlockAddress
- /// uses of the specified basic block.
- MCSymbol *GetBlockAddressSymbol(const BlockAddress *BA) const;
- MCSymbol *GetBlockAddressSymbol(const BasicBlock *BB) const;
-
- //===------------------------------------------------------------------===//
- // Emission Helper Routines.
- //===------------------------------------------------------------------===//
- public:
- /// printOffset - This is just convenient handler for printing offsets.
- void printOffset(int64_t Offset, raw_ostream &OS) const;
-
- /// EmitInt8 - Emit a byte directive and value.
- ///
- void EmitInt8(int Value) const;
-
- /// EmitInt16 - Emit a short directive and value.
- ///
- void EmitInt16(int Value) const;
-
- /// EmitInt32 - Emit a long directive and value.
- ///
- void EmitInt32(int Value) const;
-
- /// EmitLabelDifference - Emit something like ".long Hi-Lo" where the size
- /// in bytes of the directive is specified by Size and Hi/Lo specify the
- /// labels. This implicitly uses .set if it is available.
- void EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
- unsigned Size) const;
-
- /// EmitLabelOffsetDifference - Emit something like ".long Hi+Offset-Lo"
- /// where the size in bytes of the directive is specified by Size and Hi/Lo
- /// specify the labels. This implicitly uses .set if it is available.
- void EmitLabelOffsetDifference(const MCSymbol *Hi, uint64_t Offset,
- const MCSymbol *Lo, unsigned Size) const;
-
- /// EmitLabelPlusOffset - Emit something like ".long Label+Offset"
- /// where the size in bytes of the directive is specified by Size and Label
- /// specifies the label. This implicitly uses .set if it is available.
- void EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
- unsigned Size,
- bool IsSectionRelative = false) const;
-
- /// EmitLabelReference - Emit something like ".long Label"
- /// where the size in bytes of the directive is specified by Size and Label
- /// specifies the label.
- void EmitLabelReference(const MCSymbol *Label, unsigned Size,
- bool IsSectionRelative = false) const {
- EmitLabelPlusOffset(Label, 0, Size, IsSectionRelative);
- }
-
- //===------------------------------------------------------------------===//
- // Dwarf Emission Helper Routines
- //===------------------------------------------------------------------===//
-
- /// EmitSLEB128 - emit the specified signed leb128 value.
- void EmitSLEB128(int64_t Value, const char *Desc = 0) const;
-
- /// EmitULEB128 - emit the specified unsigned leb128 value.
- void EmitULEB128(uint64_t Value, const char *Desc = 0,
- unsigned PadTo = 0) const;
-
- /// EmitCFAByte - Emit a .byte 42 directive for a DW_CFA_xxx value.
- void EmitCFAByte(unsigned Val) const;
-
- /// EmitEncodingByte - Emit a .byte 42 directive that corresponds to an
- /// encoding. If verbose assembly output is enabled, we output comments
- /// describing the encoding. Desc is a string saying what the encoding is
- /// specifying (e.g. "LSDA").
- void EmitEncodingByte(unsigned Val, const char *Desc = 0) const;
-
- /// GetSizeOfEncodedValue - Return the size of the encoding in bytes.
- unsigned GetSizeOfEncodedValue(unsigned Encoding) const;
-
- /// EmitReference - Emit reference to a ttype global with a specified encoding.
- void EmitTTypeReference(const GlobalValue *GV, unsigned Encoding) const;
-
- /// EmitSectionOffset - Emit the 4-byte offset of Label from the start of
- /// its section. This can be done with a special directive if the target
- /// supports it (e.g. cygwin) or by emitting it as an offset from a label at
- /// the start of the section.
- ///
- /// SectionLabel is a temporary label emitted at the start of the section
- /// that Label lives in.
- void EmitSectionOffset(const MCSymbol *Label,
- const MCSymbol *SectionLabel) const;
-
- /// getISAEncoding - Get the value for DW_AT_APPLE_isa. Zero if no isa
- /// encoding specified.
- virtual unsigned getISAEncoding() { return 0; }
-
- /// EmitDwarfRegOp - Emit dwarf register operation.
- virtual void EmitDwarfRegOp(ByteStreamer &BS, const MachineLocation &MLoc,
- bool Indirect) const;
-
- //===------------------------------------------------------------------===//
- // Dwarf Lowering Routines
- //===------------------------------------------------------------------===//
-
- /// \brief Emit frame instruction to describe the layout of the frame.
- void emitCFIInstruction(const MCCFIInstruction &Inst) const;
-
- //===------------------------------------------------------------------===//
- // Inline Asm Support
- //===------------------------------------------------------------------===//
- public:
- // These are hooks that targets can override to implement inline asm
- // support. These should probably be moved out of AsmPrinter someday.
-
- /// PrintSpecial - Print information related to the specified machine instr
- /// that is independent of the operand, and may be independent of the instr
- /// itself. This can be useful for portably encoding the comment character
- /// or other bits of target-specific knowledge into the asmstrings. The
- /// syntax used is ${:comment}. Targets can override this to add support
- /// for their own strange codes.
- virtual void PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
- const char *Code) const;
-
- /// PrintAsmOperand - Print the specified operand of MI, an INLINEASM
- /// instruction, using the specified assembler variant. Targets should
- /// override this to format as appropriate. This method can return true if
- /// the operand is erroneous.
- virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &OS);
-
- /// PrintAsmMemoryOperand - Print the specified operand of MI, an INLINEASM
- /// instruction, using the specified assembler variant as an address.
- /// Targets should override this to format as appropriate. This method can
- /// return true if the operand is erroneous.
- virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,
- const char *ExtraCode, raw_ostream &OS);
-
- /// Let the target do anything it needs to do after emitting inlineasm.
- /// This callback can be used restore the original mode in case the
- /// inlineasm contains directives to switch modes.
- /// \p StartInfo - the original subtarget info before inline asm
- /// \p EndInfo - the final subtarget info after parsing the inline asm,
- /// or NULL if the value is unknown.
- virtual void emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
- const MCSubtargetInfo *EndInfo) const;
-
- private:
- /// Private state for PrintSpecial()
- // Assign a unique ID to this machine instruction.
- mutable const MachineInstr *LastMI;
- mutable unsigned LastFn;
- mutable unsigned Counter;
- mutable unsigned SetCounter;
-
- /// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
- void EmitInlineAsm(StringRef Str, const MDNode *LocMDNode = 0,
- InlineAsm::AsmDialect AsmDialect =
- InlineAsm::AD_ATT) const;
-
- /// EmitInlineAsm - This method formats and emits the specified machine
- /// instruction that is an inline asm.
- void EmitInlineAsm(const MachineInstr *MI) const;
-
- //===------------------------------------------------------------------===//
- // Internal Implementation Details
- //===------------------------------------------------------------------===//
-
- /// EmitVisibility - This emits visibility information about symbol, if
- /// this is suported by the target.
- void EmitVisibility(MCSymbol *Sym, unsigned Visibility,
- bool IsDefinition = true) const;
-
- void EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const;
-
- void EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
- const MachineBasicBlock *MBB, unsigned uid) const;
- void EmitLLVMUsedList(const ConstantArray *InitList);
- /// Emit llvm.ident metadata in an '.ident' directive.
- void EmitModuleIdents(Module &M);
- void EmitXXStructorList(const Constant *List, bool isCtor);
- GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy *C);
- };
+ /// Return the target triple string.
+ StringRef getTargetTriple() const;
+
+ /// Return the current section we are emitting to.
+ const MCSection *getCurrentSection() const;
+
+ void getNameWithPrefix(SmallVectorImpl<char> &Name,
+ const GlobalValue *GV) const;
+
+ MCSymbol *getSymbol(const GlobalValue *GV) const;
+
+ //===------------------------------------------------------------------===//
+ // MachineFunctionPass Implementation.
+ //===------------------------------------------------------------------===//
+
+ /// Record analysis usage.
+ ///
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ /// Set up the AsmPrinter when we are working on a new module. If your pass
+ /// overrides this, it must make sure to explicitly call this implementation.
+ bool doInitialization(Module &M) override;
+
+ /// Shut down the asmprinter. If you override this in your pass, you must make
+ /// sure to call it explicitly.
+ bool doFinalization(Module &M) override;
+
+ /// Emit the specified function out to the OutStreamer.
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ SetupMachineFunction(MF);
+ EmitFunctionHeader();
+ EmitFunctionBody();
+ return false;
+ }
+
+ //===------------------------------------------------------------------===//
+ // Coarse grained IR lowering routines.
+ //===------------------------------------------------------------------===//
+
+ /// This should be called when a new MachineFunction is being processed from
+ /// runOnMachineFunction.
+ void SetupMachineFunction(MachineFunction &MF);
+
+ /// This method emits the header for the current function.
+ void EmitFunctionHeader();
+
+ /// This method emits the body and trailer for a function.
+ void EmitFunctionBody();
+
+ void emitCFIInstruction(const MachineInstr &MI);
+
+ enum CFIMoveType { CFI_M_None, CFI_M_EH, CFI_M_Debug };
+ CFIMoveType needsCFIMoves();
+
+ bool needsSEHMoves();
+
+ /// Print to the current output stream assembly representations of the
+ /// constants in the constant pool MCP. This is used to print out constants
+ /// which have been "spilled to memory" by the code generator.
+ ///
+ virtual void EmitConstantPool();
+
+ /// Print assembly representations of the jump tables used by the current
+ /// function to the current output stream.
+ ///
+ void EmitJumpTableInfo();
+
+ /// Emit the specified global variable to the .s file.
+ virtual void EmitGlobalVariable(const GlobalVariable *GV);
+
+ /// Check to see if the specified global is a special global used by LLVM. If
+ /// so, emit it and return true, otherwise do nothing and return false.
+ bool EmitSpecialLLVMGlobal(const GlobalVariable *GV);
+
+ /// Emit an alignment directive to the specified power of two boundary. For
+ /// example, if you pass in 3 here, you will get an 8 byte alignment. If a
+ /// global value is specified, and if that global has an explicit alignment
+ /// requested, it will override the alignment request if required for
+ /// correctness.
+ ///
+ void EmitAlignment(unsigned NumBits, const GlobalObject *GO = nullptr) const;
+
+ /// This method prints the label for the specified MachineBasicBlock, an
+ /// alignment (if present) and a comment describing it if appropriate.
+ void EmitBasicBlockStart(const MachineBasicBlock &MBB) const;
+
+ /// \brief Print a general LLVM constant to the .s file.
+ void EmitGlobalConstant(const Constant *CV);
+
+ //===------------------------------------------------------------------===//
+ // Overridable Hooks
+ //===------------------------------------------------------------------===//
+
+ // Targets can, or in the case of EmitInstruction, must implement these to
+ // customize output.
+
+ /// This virtual method can be overridden by targets that want to emit
+ /// something at the start of their file.
+ virtual void EmitStartOfAsmFile(Module &) {}
+
+ /// This virtual method can be overridden by targets that want to emit
+ /// something at the end of their file.
+ virtual void EmitEndOfAsmFile(Module &) {}
+
+ /// Targets can override this to emit stuff before the first basic block in
+ /// the function.
+ virtual void EmitFunctionBodyStart() {}
+
+ /// Targets can override this to emit stuff after the last basic block in the
+ /// function.
+ virtual void EmitFunctionBodyEnd() {}
+
+ /// Targets should implement this to emit instructions.
+ virtual void EmitInstruction(const MachineInstr *) {
+ llvm_unreachable("EmitInstruction not implemented");
+ }
+
+ /// Return the symbol for the specified constant pool entry.
+ virtual MCSymbol *GetCPISymbol(unsigned CPID) const;
+
+ virtual void EmitFunctionEntryLabel();
+
+ virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
+
+ /// Targets can override this to change how global constants that are part of
+ /// a C++ static/global constructor list are emitted.
+ virtual void EmitXXStructor(const Constant *CV) { EmitGlobalConstant(CV); }
+
+ /// Return true if the basic block has exactly one predecessor and the control
+ /// transfer mechanism between the predecessor and this block is a
+ /// fall-through.
+ virtual bool
+ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
+
+ /// Targets can override this to customize the output of IMPLICIT_DEF
+ /// instructions in verbose mode.
+ virtual void emitImplicitDef(const MachineInstr *MI) const;
+
+ //===------------------------------------------------------------------===//
+ // Symbol Lowering Routines.
+ //===------------------------------------------------------------------===//
+public:
+ /// Return the MCSymbol corresponding to the assembler temporary label with
+ /// the specified stem and unique ID.
+ MCSymbol *GetTempSymbol(Twine Name, unsigned ID) const;
+
+ /// Return an assembler temporary label with the specified stem.
+ MCSymbol *GetTempSymbol(Twine Name) const;
+
+ /// Return the MCSymbol for a private symbol with global value name as its
+ /// base, with the specified suffix.
+ MCSymbol *getSymbolWithGlobalValueBase(const GlobalValue *GV,
+ StringRef Suffix) const;
+
+ /// Return the MCSymbol for the specified ExternalSymbol.
+ MCSymbol *GetExternalSymbolSymbol(StringRef Sym) const;
+
+ /// Return the symbol for the specified jump table entry.
+ MCSymbol *GetJTISymbol(unsigned JTID, bool isLinkerPrivate = false) const;
+
+ /// Return the symbol for the specified jump table .set
+ /// FIXME: privatize to AsmPrinter.
+ MCSymbol *GetJTSetSymbol(unsigned UID, unsigned MBBID) const;
+
+ /// Return the MCSymbol used to satisfy BlockAddress uses of the specified
+ /// basic block.
+ MCSymbol *GetBlockAddressSymbol(const BlockAddress *BA) const;
+ MCSymbol *GetBlockAddressSymbol(const BasicBlock *BB) const;
+
+ //===------------------------------------------------------------------===//
+ // Emission Helper Routines.
+ //===------------------------------------------------------------------===//
+public:
+ /// This is just convenient handler for printing offsets.
+ void printOffset(int64_t Offset, raw_ostream &OS) const;
+
+ /// Emit a byte directive and value.
+ ///
+ void EmitInt8(int Value) const;
+
+ /// Emit a short directive and value.
+ ///
+ void EmitInt16(int Value) const;
+
+ /// Emit a long directive and value.
+ ///
+ void EmitInt32(int Value) const;
+
+ /// Emit something like ".long Hi-Lo" where the size in bytes of the directive
+ /// is specified by Size and Hi/Lo specify the labels. This implicitly uses
+ /// .set if it is available.
+ void EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
+ unsigned Size) const;
+
+ /// Emit something like ".long Hi+Offset-Lo" where the size in bytes of the
+ /// directive is specified by Size and Hi/Lo specify the labels. This
+ /// implicitly uses .set if it is available.
+ void EmitLabelOffsetDifference(const MCSymbol *Hi, uint64_t Offset,
+ const MCSymbol *Lo, unsigned Size) const;
+
+ /// Emit something like ".long Label+Offset" where the size in bytes of the
+ /// directive is specified by Size and Label specifies the label. This
+ /// implicitly uses .set if it is available.
+ void EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
+ unsigned Size, bool IsSectionRelative = false) const;
+
+ /// Emit something like ".long Label" where the size in bytes of the directive
+ /// is specified by Size and Label specifies the label.
+ void EmitLabelReference(const MCSymbol *Label, unsigned Size,
+ bool IsSectionRelative = false) const {
+ EmitLabelPlusOffset(Label, 0, Size, IsSectionRelative);
+ }
+
+ //===------------------------------------------------------------------===//
+ // Dwarf Emission Helper Routines
+ //===------------------------------------------------------------------===//
+
+ /// Emit the specified signed leb128 value.
+ void EmitSLEB128(int64_t Value, const char *Desc = nullptr) const;
+
+ /// Emit the specified unsigned leb128 value.
+ void EmitULEB128(uint64_t Value, const char *Desc = nullptr,
+ unsigned PadTo = 0) const;
+
+ /// Emit a .byte 42 directive for a DW_CFA_xxx value.
+ void EmitCFAByte(unsigned Val) const;
+
+ /// Emit a .byte 42 directive that corresponds to an encoding. If verbose
+ /// assembly output is enabled, we output comments describing the encoding.
+ /// Desc is a string saying what the encoding is specifying (e.g. "LSDA").
+ void EmitEncodingByte(unsigned Val, const char *Desc = nullptr) const;
+
+ /// Return the size of the encoding in bytes.
+ unsigned GetSizeOfEncodedValue(unsigned Encoding) const;
+
+ /// Emit reference to a ttype global with a specified encoding.
+ void EmitTTypeReference(const GlobalValue *GV, unsigned Encoding) const;
+
+ /// Emit the 4-byte offset of Label from the start of its section. This can
+ /// be done with a special directive if the target supports it (e.g. cygwin)
+ /// or by emitting it as an offset from a label at the start of the section.
+ ///
+ /// SectionLabel is a temporary label emitted at the start of the section
+ /// that Label lives in.
+ void EmitSectionOffset(const MCSymbol *Label,
+ const MCSymbol *SectionLabel) const;
+
+ /// Get the value for DW_AT_APPLE_isa. Zero if no isa encoding specified.
+ virtual unsigned getISAEncoding() { return 0; }
+
+ /// \brief Emit a partial DWARF register operation.
+ /// \param MLoc the register
+ /// \param PieceSize size and
+ /// \param PieceOffset offset of the piece in bits, if this is one
+ /// piece of an aggregate value.
+ ///
+ /// If size and offset is zero an operation for the entire
+ /// register is emitted: Some targets do not provide a DWARF
+ /// register number for every register. If this is the case, this
+ /// function will attempt to emit a DWARF register by emitting a
+ /// piece of a super-register or by piecing together multiple
+ /// subregisters that alias the register.
+ void EmitDwarfRegOpPiece(ByteStreamer &BS, const MachineLocation &MLoc,
+ unsigned PieceSize = 0,
+ unsigned PieceOffset = 0) const;
+
+ /// Emit dwarf register operation.
+ /// \param Indirect whether this is a register-indirect address
+ virtual void EmitDwarfRegOp(ByteStreamer &BS, const MachineLocation &MLoc,
+ bool Indirect) const;
+
+ //===------------------------------------------------------------------===//
+ // Dwarf Lowering Routines
+ //===------------------------------------------------------------------===//
+
+ /// \brief Emit frame instruction to describe the layout of the frame.
+ void emitCFIInstruction(const MCCFIInstruction &Inst) const;
+
+ //===------------------------------------------------------------------===//
+ // Inline Asm Support
+ //===------------------------------------------------------------------===//
+public:
+ // These are hooks that targets can override to implement inline asm
+ // support. These should probably be moved out of AsmPrinter someday.
+
+ /// Print information related to the specified machine instr that is
+ /// independent of the operand, and may be independent of the instr itself.
+ /// This can be useful for portably encoding the comment character or other
+ /// bits of target-specific knowledge into the asmstrings. The syntax used is
+ /// ${:comment}. Targets can override this to add support for their own
+ /// strange codes.
+ virtual void PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
+ const char *Code) const;
+
+ /// Print the specified operand of MI, an INLINEASM instruction, using the
+ /// specified assembler variant. Targets should override this to format as
+ /// appropriate. This method can return true if the operand is erroneous.
+ virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS);
+
+ /// Print the specified operand of MI, an INLINEASM instruction, using the
+ /// specified assembler variant as an address. Targets should override this to
+ /// format as appropriate. This method can return true if the operand is
+ /// erroneous.
+ virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS);
+
+ /// Let the target do anything it needs to do after emitting inlineasm.
+ /// This callback can be used restore the original mode in case the
+ /// inlineasm contains directives to switch modes.
+ /// \p StartInfo - the original subtarget info before inline asm
+ /// \p EndInfo - the final subtarget info after parsing the inline asm,
+ /// or NULL if the value is unknown.
+ virtual void emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
+ const MCSubtargetInfo *EndInfo) const;
+
+private:
+ /// Private state for PrintSpecial()
+ // Assign a unique ID to this machine instruction.
+ mutable const MachineInstr *LastMI;
+ mutable unsigned LastFn;
+ mutable unsigned Counter;
+ mutable unsigned SetCounter;
+
+ /// Emit a blob of inline asm to the output streamer.
+ void
+ EmitInlineAsm(StringRef Str, const MDNode *LocMDNode = nullptr,
+ InlineAsm::AsmDialect AsmDialect = InlineAsm::AD_ATT) const;
+
+ /// This method formats and emits the specified machine instruction that is an
+ /// inline asm.
+ void EmitInlineAsm(const MachineInstr *MI) const;
+
+ //===------------------------------------------------------------------===//
+ // Internal Implementation Details
+ //===------------------------------------------------------------------===//
+
+ /// This emits visibility information about symbol, if this is suported by the
+ /// target.
+ void EmitVisibility(MCSymbol *Sym, unsigned Visibility,
+ bool IsDefinition = true) const;
+
+ void EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const;
+
+ void EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
+ const MachineBasicBlock *MBB, unsigned uid) const;
+ void EmitLLVMUsedList(const ConstantArray *InitList);
+ /// Emit llvm.ident metadata in an '.ident' directive.
+ void EmitModuleIdents(Module &M);
+ void EmitXXStructorList(const Constant *List, bool isCtor);
+ GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy &C);
+};
}
#endif
diff --git a/include/llvm/CodeGen/CallingConvLower.h b/include/llvm/CodeGen/CallingConvLower.h
index 50bbb0d..04af4bd 100644
--- a/include/llvm/CodeGen/CallingConvLower.h
+++ b/include/llvm/CodeGen/CallingConvLower.h
@@ -112,6 +112,23 @@ public:
return Ret;
}
+ // There is no need to differentiate between a pending CCValAssign and other
+ // kinds, as they are stored in a different list.
+ static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT,
+ LocInfo HTP) {
+ return getReg(ValNo, ValVT, 0, LocVT, HTP);
+ }
+
+ void convertToReg(unsigned RegNo) {
+ Loc = RegNo;
+ isMem = false;
+ }
+
+ void convertToMem(unsigned Offset) {
+ Loc = Offset;
+ isMem = true;
+ }
+
unsigned getValNo() const { return ValNo; }
MVT getValVT() const { return ValVT; }
@@ -164,6 +181,7 @@ private:
unsigned StackOffset;
SmallVector<uint32_t, 16> UsedRegs;
+ SmallVector<CCValAssign, 4> PendingLocs;
// ByValInfo and SmallVector<ByValInfo, 4> ByValRegs:
//
@@ -279,7 +297,7 @@ public:
/// getFirstUnallocated - Return the first unallocated register in the set, or
/// NumRegs if they are all allocated.
- unsigned getFirstUnallocated(const uint16_t *Regs, unsigned NumRegs) const {
+ unsigned getFirstUnallocated(const MCPhysReg *Regs, unsigned NumRegs) const {
for (unsigned i = 0; i != NumRegs; ++i)
if (!isAllocated(Regs[i]))
return i;
@@ -306,7 +324,7 @@ public:
/// AllocateReg - Attempt to allocate one of the specified registers. If none
/// are available, return zero. Otherwise, return the first one available,
/// marking it and any aliases as allocated.
- unsigned AllocateReg(const uint16_t *Regs, unsigned NumRegs) {
+ unsigned AllocateReg(const MCPhysReg *Regs, unsigned NumRegs) {
unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
if (FirstUnalloc == NumRegs)
return 0; // Didn't find the reg.
@@ -317,8 +335,33 @@ public:
return Reg;
}
+ /// AllocateRegBlock - Attempt to allocate a block of RegsRequired consecutive
+ /// registers. If this is not possible, return zero. Otherwise, return the first
+ /// register of the block that were allocated, marking the entire block as allocated.
+ unsigned AllocateRegBlock(const uint16_t *Regs, unsigned NumRegs, unsigned RegsRequired) {
+ for (unsigned StartIdx = 0; StartIdx <= NumRegs - RegsRequired; ++StartIdx) {
+ bool BlockAvailable = true;
+ // Check for already-allocated regs in this block
+ for (unsigned BlockIdx = 0; BlockIdx < RegsRequired; ++BlockIdx) {
+ if (isAllocated(Regs[StartIdx + BlockIdx])) {
+ BlockAvailable = false;
+ break;
+ }
+ }
+ if (BlockAvailable) {
+ // Mark the entire block as allocated
+ for (unsigned BlockIdx = 0; BlockIdx < RegsRequired; ++BlockIdx) {
+ MarkAllocated(Regs[StartIdx + BlockIdx]);
+ }
+ return Regs[StartIdx];
+ }
+ }
+ // No block was available
+ return 0;
+ }
+
/// Version of AllocateReg with list of registers to be shadowed.
- unsigned AllocateReg(const uint16_t *Regs, const uint16_t *ShadowRegs,
+ unsigned AllocateReg(const MCPhysReg *Regs, const MCPhysReg *ShadowRegs,
unsigned NumRegs) {
unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
if (FirstUnalloc == NumRegs)
@@ -351,7 +394,7 @@ public:
/// Version of AllocateStack with list of extra registers to be shadowed.
/// Note that, unlike AllocateReg, this shadows ALL of the shadow registers.
unsigned AllocateStack(unsigned Size, unsigned Align,
- const uint16_t *ShadowRegs, unsigned NumShadowRegs) {
+ const MCPhysReg *ShadowRegs, unsigned NumShadowRegs) {
for (unsigned i = 0; i < NumShadowRegs; ++i)
MarkAllocated(ShadowRegs[i]);
return AllocateStack(Size, Align);
@@ -411,6 +454,11 @@ public:
ParmContext getCallOrPrologue() const { return CallOrPrologue; }
+ // Get list of pending assignments
+ SmallVectorImpl<llvm::CCValAssign> &getPendingLocs() {
+ return PendingLocs;
+ }
+
private:
/// MarkAllocated - Mark a register and all of its aliases as allocated.
void MarkAllocated(unsigned Reg);
diff --git a/include/llvm/CodeGen/CommandFlags.h b/include/llvm/CodeGen/CommandFlags.h
index 02a4bb5..2956ad8 100644
--- a/include/llvm/CodeGen/CommandFlags.h
+++ b/include/llvm/CodeGen/CommandFlags.h
@@ -16,6 +16,7 @@
#ifndef LLVM_CODEGEN_COMMANDFLAGS_H
#define LLVM_CODEGEN_COMMANDFLAGS_H
+#include "llvm/MC/MCTargetOptionsCommandFlags.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetMachine.h"
@@ -69,11 +70,6 @@ CMModel("code-model",
"Large code model"),
clEnumValEnd));
-cl::opt<bool>
-RelaxAll("mc-relax-all",
- cl::desc("When used with filetype=obj, "
- "relax all fixups in the emitted object file"));
-
cl::opt<TargetMachine::CodeGenFileType>
FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
cl::desc("Choose a file type (not all types are supported by all targets):"),
@@ -86,12 +82,6 @@ FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
"Emit nothing, for performance testing"),
clEnumValEnd));
-cl::opt<bool> DisableCFI("disable-cfi", cl::Hidden,
- cl::desc("Do not use .cfi_* directives"));
-
-cl::opt<bool> EnableDwarfDirectory("enable-dwarf-directory", cl::Hidden,
- cl::desc("Use .file directives with an explicit directory."));
-
cl::opt<bool>
DisableRedZone("disable-red-zone",
cl::desc("Do not emit code that uses the red zone."),
@@ -190,11 +180,6 @@ EnablePIE("enable-pie",
cl::init(false));
cl::opt<bool>
-SegmentedStacks("segmented-stacks",
- cl::desc("Use segmented stacks if possible."),
- cl::init(false));
-
-cl::opt<bool>
UseInitArray("use-init-array",
cl::desc("Use .init_array instead of .ctors."),
cl::init(false));
@@ -208,6 +193,15 @@ cl::opt<std::string> StartAfter("start-after",
cl::value_desc("pass-name"),
cl::init(""));
+cl::opt<bool> DataSections("data-sections",
+ cl::desc("Emit data into separate sections"),
+ cl::init(false));
+
+cl::opt<bool>
+FunctionSections("function-sections",
+ cl::desc("Emit functions into separate sections"),
+ cl::init(false));
+
// Common utility function tightly tied to the options listed here. Initializes
// a TargetOptions object with CodeGen flags and returns it.
static inline TargetOptions InitTargetOptionsFromCodeGenFlags() {
@@ -229,8 +223,12 @@ static inline TargetOptions InitTargetOptionsFromCodeGenFlags() {
Options.StackAlignmentOverride = OverrideStackAlignment;
Options.TrapFuncName = TrapFuncName;
Options.PositionIndependentExecutable = EnablePIE;
- Options.EnableSegmentedStacks = SegmentedStacks;
Options.UseInitArray = UseInitArray;
+ Options.DataSections = DataSections;
+ Options.FunctionSections = FunctionSections;
+
+ Options.MCOptions = InitMCTargetOptionsFromFlags();
+
return Options;
}
diff --git a/include/llvm/CodeGen/EdgeBundles.h b/include/llvm/CodeGen/EdgeBundles.h
index 2899fe1..c31fad2 100644
--- a/include/llvm/CodeGen/EdgeBundles.h
+++ b/include/llvm/CodeGen/EdgeBundles.h
@@ -59,11 +59,6 @@ private:
void getAnalysisUsage(AnalysisUsage&) const override;
};
-/// Specialize WriteGraph, the standard implementation won't work.
-raw_ostream &WriteGraph(raw_ostream &O, const EdgeBundles &G,
- bool ShortNames = false,
- const Twine &Title = "");
-
} // end namespace llvm
#endif
diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h
index aeffbd4..bfeede2 100644
--- a/include/llvm/CodeGen/FastISel.h
+++ b/include/llvm/CodeGen/FastISel.h
@@ -343,6 +343,12 @@ protected:
unsigned createResultReg(const TargetRegisterClass *RC);
+ /// Try to constrain Op so that it is usable by argument OpNum of the provided
+ /// MCInstrDesc. If this fails, create a new virtual register in the correct
+ /// class and COPY the value there.
+ unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
+ unsigned OpNum);
+
/// Emit a constant in a register using target-specific logic, such as
/// constant pool loads.
virtual unsigned TargetMaterializeConstant(const Constant* C) {
diff --git a/include/llvm/CodeGen/FunctionLoweringInfo.h b/include/llvm/CodeGen/FunctionLoweringInfo.h
index 06e7aaa..9636b51 100644
--- a/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -153,11 +153,11 @@ public:
/// register is a PHI destination and the PHI's LiveOutInfo is not valid.
const LiveOutInfo *GetLiveOutRegInfo(unsigned Reg) {
if (!LiveOutRegInfo.inBounds(Reg))
- return NULL;
+ return nullptr;
const LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
if (!LOI->IsValid)
- return NULL;
+ return nullptr;
return LOI;
}
diff --git a/include/llvm/CodeGen/GCMetadata.h b/include/llvm/CodeGen/GCMetadata.h
index ea94542..ddcc823 100644
--- a/include/llvm/CodeGen/GCMetadata.h
+++ b/include/llvm/CodeGen/GCMetadata.h
@@ -38,6 +38,8 @@
#include "llvm/IR/DebugLoc.h"
#include "llvm/Pass.h"
+#include <memory>
+
namespace llvm {
class AsmPrinter;
class GCStrategy;
@@ -163,7 +165,7 @@ namespace llvm {
///
class GCModuleInfo : public ImmutablePass {
typedef StringMap<GCStrategy*> strategy_map_type;
- typedef std::vector<GCStrategy*> list_type;
+ typedef std::vector<std::unique_ptr<GCStrategy>> list_type;
typedef DenseMap<const Function*,GCFunctionInfo*> finfo_map_type;
strategy_map_type StrategyMap;
@@ -178,7 +180,6 @@ namespace llvm {
static char ID;
GCModuleInfo();
- ~GCModuleInfo();
/// clear - Resets the pass. Any pass, which uses GCModuleInfo, should
/// call it in doFinalization().
diff --git a/include/llvm/CodeGen/GCStrategy.h b/include/llvm/CodeGen/GCStrategy.h
index dfc26d7..81e1f85 100644
--- a/include/llvm/CodeGen/GCStrategy.h
+++ b/include/llvm/CodeGen/GCStrategy.h
@@ -54,7 +54,7 @@ namespace llvm {
/// be abstractly described.
class GCStrategy {
public:
- typedef std::vector<GCFunctionInfo*> list_type;
+ typedef std::vector<std::unique_ptr<GCFunctionInfo>> list_type;
typedef list_type::iterator iterator;
private:
@@ -77,7 +77,7 @@ namespace llvm {
public:
GCStrategy();
- virtual ~GCStrategy();
+ virtual ~GCStrategy() {}
/// getName - The name of the GC strategy, for debugging.
diff --git a/include/llvm/CodeGen/ISDOpcodes.h b/include/llvm/CodeGen/ISDOpcodes.h
index 89b0908..49891b2 100644
--- a/include/llvm/CodeGen/ISDOpcodes.h
+++ b/include/llvm/CodeGen/ISDOpcodes.h
@@ -72,6 +72,11 @@ namespace ISD {
/// the parent's frame or return address, and so on.
FRAMEADDR, RETURNADDR,
+ /// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on
+ /// the DAG, which implements the named register global variables extension.
+ READ_REGISTER,
+ WRITE_REGISTER,
+
/// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
/// first (possible) on-stack argument. This is needed for correct stack
/// adjustment during unwind.
diff --git a/include/llvm/CodeGen/JITCodeEmitter.h b/include/llvm/CodeGen/JITCodeEmitter.h
index bb0df2e..dc2a027 100644
--- a/include/llvm/CodeGen/JITCodeEmitter.h
+++ b/include/llvm/CodeGen/JITCodeEmitter.h
@@ -260,7 +260,7 @@ public:
// Check for buffer overflow.
if (Size >= (uintptr_t)(BufferEnd-CurBufferPtr)) {
CurBufferPtr = BufferEnd;
- Result = 0;
+ Result = nullptr;
} else {
// Allocate the space.
Result = CurBufferPtr;
@@ -334,7 +334,9 @@ public:
/// getLabelLocations - Return the label locations map of the label IDs to
/// their address.
- virtual DenseMap<MCSymbol*, uintptr_t> *getLabelLocations() { return 0; }
+ virtual DenseMap<MCSymbol*, uintptr_t> *getLabelLocations() {
+ return nullptr;
+ }
};
} // End llvm namespace
diff --git a/include/llvm/CodeGen/LatencyPriorityQueue.h b/include/llvm/CodeGen/LatencyPriorityQueue.h
index d566da8..cf601ae 100644
--- a/include/llvm/CodeGen/LatencyPriorityQueue.h
+++ b/include/llvm/CodeGen/LatencyPriorityQueue.h
@@ -62,7 +62,7 @@ namespace llvm {
}
void releaseState() override {
- SUnits = 0;
+ SUnits = nullptr;
}
unsigned getLatency(unsigned NodeNum) const {
diff --git a/include/llvm/CodeGen/LexicalScopes.h b/include/llvm/CodeGen/LexicalScopes.h
index e0593f8..31d6872 100644
--- a/include/llvm/CodeGen/LexicalScopes.h
+++ b/include/llvm/CodeGen/LexicalScopes.h
@@ -21,16 +21,17 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/ValueHandle.h"
#include <utility>
+#include <unordered_map>
namespace llvm {
class MachineInstr;
class MachineBasicBlock;
class MachineFunction;
-class LexicalScope;
//===----------------------------------------------------------------------===//
/// InsnRange - This is used to track range of instructions with identical
@@ -39,13 +40,103 @@ class LexicalScope;
typedef std::pair<const MachineInstr *, const MachineInstr *> InsnRange;
//===----------------------------------------------------------------------===//
+/// LexicalScope - This class is used to track scope information.
+///
+class LexicalScope {
+
+public:
+ LexicalScope(LexicalScope *P, const MDNode *D, const MDNode *I, bool A)
+ : Parent(P), Desc(D), InlinedAtLocation(I), AbstractScope(A),
+ LastInsn(nullptr), FirstInsn(nullptr), DFSIn(0), DFSOut(0) {
+ if (Parent)
+ Parent->addChild(this);
+ }
+
+ // Accessors.
+ LexicalScope *getParent() const { return Parent; }
+ const MDNode *getDesc() const { return Desc; }
+ const MDNode *getInlinedAt() const { return InlinedAtLocation; }
+ const MDNode *getScopeNode() const { return Desc; }
+ bool isAbstractScope() const { return AbstractScope; }
+ SmallVectorImpl<LexicalScope *> &getChildren() { return Children; }
+ SmallVectorImpl<InsnRange> &getRanges() { return Ranges; }
+
+ /// addChild - Add a child scope.
+ void addChild(LexicalScope *S) { Children.push_back(S); }
+
+ /// openInsnRange - This scope covers instruction range starting from MI.
+ void openInsnRange(const MachineInstr *MI) {
+ if (!FirstInsn)
+ FirstInsn = MI;
+
+ if (Parent)
+ Parent->openInsnRange(MI);
+ }
+
+ /// extendInsnRange - Extend the current instruction range covered by
+ /// this scope.
+ void extendInsnRange(const MachineInstr *MI) {
+ assert(FirstInsn && "MI Range is not open!");
+ LastInsn = MI;
+ if (Parent)
+ Parent->extendInsnRange(MI);
+ }
+
+ /// closeInsnRange - Create a range based on FirstInsn and LastInsn collected
+ /// until now. This is used when a new scope is encountered while walking
+ /// machine instructions.
+ void closeInsnRange(LexicalScope *NewScope = nullptr) {
+ assert(LastInsn && "Last insn missing!");
+ Ranges.push_back(InsnRange(FirstInsn, LastInsn));
+ FirstInsn = nullptr;
+ LastInsn = nullptr;
+ // If Parent dominates NewScope then do not close Parent's instruction
+ // range.
+ if (Parent && (!NewScope || !Parent->dominates(NewScope)))
+ Parent->closeInsnRange(NewScope);
+ }
+
+ /// dominates - Return true if current scope dominates given lexical scope.
+ bool dominates(const LexicalScope *S) const {
+ if (S == this)
+ return true;
+ if (DFSIn < S->getDFSIn() && DFSOut > S->getDFSOut())
+ return true;
+ return false;
+ }
+
+ // Depth First Search support to walk and manipulate LexicalScope hierarchy.
+ unsigned getDFSOut() const { return DFSOut; }
+ void setDFSOut(unsigned O) { DFSOut = O; }
+ unsigned getDFSIn() const { return DFSIn; }
+ void setDFSIn(unsigned I) { DFSIn = I; }
+
+ /// dump - print lexical scope.
+ void dump(unsigned Indent = 0) const;
+
+private:
+ LexicalScope *Parent; // Parent to this scope.
+ AssertingVH<const MDNode> Desc; // Debug info descriptor.
+ AssertingVH<const MDNode> InlinedAtLocation; // Location at which this
+ // scope is inlined.
+ bool AbstractScope; // Abstract Scope
+ SmallVector<LexicalScope *, 4> Children; // Scopes defined in scope.
+ // Contents not owned.
+ SmallVector<InsnRange, 4> Ranges;
+
+ const MachineInstr *LastInsn; // Last instruction of this scope.
+ const MachineInstr *FirstInsn; // First instruction of this scope.
+ unsigned DFSIn, DFSOut; // In & Out Depth use to determine
+ // scope nesting.
+};
+
+//===----------------------------------------------------------------------===//
/// LexicalScopes - This class provides interface to collect and use lexical
/// scoping information from machine instruction.
///
class LexicalScopes {
public:
- LexicalScopes() : MF(NULL), CurrentFnLexicalScope(NULL) {}
- ~LexicalScopes();
+ LexicalScopes() : MF(nullptr), CurrentFnLexicalScope(nullptr) {}
/// initialize - Scan machine function and constuct lexical scope nest, resets
/// the instance if necessary.
@@ -55,7 +146,7 @@ public:
void reset();
/// empty - Return true if there is any lexical scope information available.
- bool empty() { return CurrentFnLexicalScope == NULL; }
+ bool empty() { return CurrentFnLexicalScope == nullptr; }
/// isCurrentFunctionScope - Return true if given lexical scope represents
/// current function.
@@ -87,20 +178,20 @@ public:
return AbstractScopesList;
}
- /// findAbstractScope - Find an abstract scope or return NULL.
+ /// findAbstractScope - Find an abstract scope or return null.
LexicalScope *findAbstractScope(const MDNode *N) {
- return AbstractScopeMap.lookup(N);
+ auto I = AbstractScopeMap.find(N);
+ return I != AbstractScopeMap.end() ? &I->second : nullptr;
}
/// findInlinedScope - Find an inlined scope for the given DebugLoc or return
/// NULL.
- LexicalScope *findInlinedScope(DebugLoc DL) {
- return InlinedLexicalScopeMap.lookup(DL);
- }
+ LexicalScope *findInlinedScope(DebugLoc DL);
- /// findLexicalScope - Find regular lexical scope or return NULL.
+ /// findLexicalScope - Find regular lexical scope or return null.
LexicalScope *findLexicalScope(const MDNode *N) {
- return LexicalScopeMap.lookup(N);
+ auto I = LexicalScopeMap.find(N);
+ return I != LexicalScopeMap.end() ? &I->second : nullptr;
}
/// dump - Print data structures to dbgs().
@@ -132,17 +223,19 @@ private:
private:
const MachineFunction *MF;
- /// LexicalScopeMap - Tracks the scopes in the current function. Owns the
- /// contained LexicalScope*s.
- DenseMap<const MDNode *, LexicalScope *> LexicalScopeMap;
+ /// LexicalScopeMap - Tracks the scopes in the current function.
+ // Use an unordered_map to ensure value pointer validity over insertion.
+ std::unordered_map<const MDNode *, LexicalScope> LexicalScopeMap;
/// InlinedLexicalScopeMap - Tracks inlined function scopes in current
/// function.
- DenseMap<DebugLoc, LexicalScope *> InlinedLexicalScopeMap;
+ std::unordered_map<std::pair<const MDNode *, const MDNode *>, LexicalScope,
+ pair_hash<const MDNode *, const MDNode *>>
+ InlinedLexicalScopeMap;
/// AbstractScopeMap - These scopes are not included LexicalScopeMap.
- /// AbstractScopes owns its LexicalScope*s.
- DenseMap<const MDNode *, LexicalScope *> AbstractScopeMap;
+ // Use an unordered_map to ensure value pointer validity over insertion.
+ std::unordered_map<const MDNode *, LexicalScope> AbstractScopeMap;
/// AbstractScopesList - Tracks abstract scopes constructed while processing
/// a function.
@@ -153,97 +246,6 @@ private:
LexicalScope *CurrentFnLexicalScope;
};
-//===----------------------------------------------------------------------===//
-/// LexicalScope - This class is used to track scope information.
-///
-class LexicalScope {
-
-public:
- LexicalScope(LexicalScope *P, const MDNode *D, const MDNode *I, bool A)
- : Parent(P), Desc(D), InlinedAtLocation(I), AbstractScope(A), LastInsn(0),
- FirstInsn(0), DFSIn(0), DFSOut(0) {
- if (Parent)
- Parent->addChild(this);
- }
-
- // Accessors.
- LexicalScope *getParent() const { return Parent; }
- const MDNode *getDesc() const { return Desc; }
- const MDNode *getInlinedAt() const { return InlinedAtLocation; }
- const MDNode *getScopeNode() const { return Desc; }
- bool isAbstractScope() const { return AbstractScope; }
- SmallVectorImpl<LexicalScope *> &getChildren() { return Children; }
- SmallVectorImpl<InsnRange> &getRanges() { return Ranges; }
-
- /// addChild - Add a child scope.
- void addChild(LexicalScope *S) { Children.push_back(S); }
-
- /// openInsnRange - This scope covers instruction range starting from MI.
- void openInsnRange(const MachineInstr *MI) {
- if (!FirstInsn)
- FirstInsn = MI;
-
- if (Parent)
- Parent->openInsnRange(MI);
- }
-
- /// extendInsnRange - Extend the current instruction range covered by
- /// this scope.
- void extendInsnRange(const MachineInstr *MI) {
- assert(FirstInsn && "MI Range is not open!");
- LastInsn = MI;
- if (Parent)
- Parent->extendInsnRange(MI);
- }
-
- /// closeInsnRange - Create a range based on FirstInsn and LastInsn collected
- /// until now. This is used when a new scope is encountered while walking
- /// machine instructions.
- void closeInsnRange(LexicalScope *NewScope = NULL) {
- assert(LastInsn && "Last insn missing!");
- Ranges.push_back(InsnRange(FirstInsn, LastInsn));
- FirstInsn = NULL;
- LastInsn = NULL;
- // If Parent dominates NewScope then do not close Parent's instruction
- // range.
- if (Parent && (!NewScope || !Parent->dominates(NewScope)))
- Parent->closeInsnRange(NewScope);
- }
-
- /// dominates - Return true if current scope dominates given lexical scope.
- bool dominates(const LexicalScope *S) const {
- if (S == this)
- return true;
- if (DFSIn < S->getDFSIn() && DFSOut > S->getDFSOut())
- return true;
- return false;
- }
-
- // Depth First Search support to walk and manipulate LexicalScope hierarchy.
- unsigned getDFSOut() const { return DFSOut; }
- void setDFSOut(unsigned O) { DFSOut = O; }
- unsigned getDFSIn() const { return DFSIn; }
- void setDFSIn(unsigned I) { DFSIn = I; }
-
- /// dump - print lexical scope.
- void dump(unsigned Indent = 0) const;
-
-private:
- LexicalScope *Parent; // Parent to this scope.
- AssertingVH<const MDNode> Desc; // Debug info descriptor.
- AssertingVH<const MDNode> InlinedAtLocation; // Location at which this
- // scope is inlined.
- bool AbstractScope; // Abstract Scope
- SmallVector<LexicalScope *, 4> Children; // Scopes defined in scope.
- // Contents not owned.
- SmallVector<InsnRange, 4> Ranges;
-
- const MachineInstr *LastInsn; // Last instruction of this scope.
- const MachineInstr *FirstInsn; // First instruction of this scope.
- unsigned DFSIn, DFSOut; // In & Out Depth use to determine
- // scope nesting.
-};
-
} // end llvm namespace
#endif
diff --git a/include/llvm/CodeGen/LinkAllCodegenComponents.h b/include/llvm/CodeGen/LinkAllCodegenComponents.h
index 916c0f2..372c294 100644
--- a/include/llvm/CodeGen/LinkAllCodegenComponents.h
+++ b/include/llvm/CodeGen/LinkAllCodegenComponents.h
@@ -40,12 +40,15 @@ namespace {
llvm::linkErlangGC();
llvm::linkShadowStackGC();
- (void) llvm::createBURRListDAGScheduler(NULL, llvm::CodeGenOpt::Default);
- (void) llvm::createSourceListDAGScheduler(NULL,llvm::CodeGenOpt::Default);
- (void) llvm::createHybridListDAGScheduler(NULL,llvm::CodeGenOpt::Default);
- (void) llvm::createFastDAGScheduler(NULL, llvm::CodeGenOpt::Default);
- (void) llvm::createDefaultScheduler(NULL, llvm::CodeGenOpt::Default);
- (void) llvm::createVLIWDAGScheduler(NULL, llvm::CodeGenOpt::Default);
+ (void) llvm::createBURRListDAGScheduler(nullptr,
+ llvm::CodeGenOpt::Default);
+ (void) llvm::createSourceListDAGScheduler(nullptr,
+ llvm::CodeGenOpt::Default);
+ (void) llvm::createHybridListDAGScheduler(nullptr,
+ llvm::CodeGenOpt::Default);
+ (void) llvm::createFastDAGScheduler(nullptr, llvm::CodeGenOpt::Default);
+ (void) llvm::createDefaultScheduler(nullptr, llvm::CodeGenOpt::Default);
+ (void) llvm::createVLIWDAGScheduler(nullptr, llvm::CodeGenOpt::Default);
}
} ForceCodegenLinking; // Force link by creating a global definition.
diff --git a/include/llvm/CodeGen/LiveInterval.h b/include/llvm/CodeGen/LiveInterval.h
index 41d126a..6629e60 100644
--- a/include/llvm/CodeGen/LiveInterval.h
+++ b/include/llvm/CodeGen/LiveInterval.h
@@ -116,13 +116,13 @@ namespace llvm {
/// Return the value leaving the instruction, if any. This can be a
/// live-through value, or a live def. A dead def returns NULL.
VNInfo *valueOut() const {
- return isDeadDef() ? 0 : LateVal;
+ return isDeadDef() ? nullptr : LateVal;
}
/// Return the value defined by this instruction, if any. This includes
/// dead defs, it is the value created by the instruction's def operands.
VNInfo *valueDefined() const {
- return EarlyVal == LateVal ? 0 : LateVal;
+ return EarlyVal == LateVal ? nullptr : LateVal;
}
/// Return the end point of the last live range segment to interact with
@@ -154,7 +154,7 @@ namespace llvm {
SlotIndex end; // End point of the interval (exclusive)
VNInfo *valno; // identifier for the value contained in this segment.
- Segment() : valno(0) {}
+ Segment() : valno(nullptr) {}
Segment(SlotIndex S, SlotIndex E, VNInfo *V)
: start(S), end(E), valno(V) {
@@ -336,20 +336,20 @@ namespace llvm {
/// is none.
const Segment *getSegmentContaining(SlotIndex Idx) const {
const_iterator I = FindSegmentContaining(Idx);
- return I == end() ? 0 : &*I;
+ return I == end() ? nullptr : &*I;
}
/// Return the live segment that contains the specified index, or null if
/// there is none.
Segment *getSegmentContaining(SlotIndex Idx) {
iterator I = FindSegmentContaining(Idx);
- return I == end() ? 0 : &*I;
+ return I == end() ? nullptr : &*I;
}
/// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
VNInfo *getVNInfoAt(SlotIndex Idx) const {
const_iterator I = FindSegmentContaining(Idx);
- return I == end() ? 0 : I->valno;
+ return I == end() ? nullptr : I->valno;
}
/// getVNInfoBefore - Return the VNInfo that is live up to but not
@@ -357,7 +357,7 @@ namespace llvm {
/// used by an instruction at this SlotIndex position.
VNInfo *getVNInfoBefore(SlotIndex Idx) const {
const_iterator I = FindSegmentContaining(Idx.getPrevSlot());
- return I == end() ? 0 : I->valno;
+ return I == end() ? nullptr : I->valno;
}
/// Return an iterator to the segment that contains the specified index, or
@@ -443,13 +443,13 @@ namespace llvm {
const_iterator I = find(Idx.getBaseIndex());
const_iterator E = end();
if (I == E)
- return LiveQueryResult(0, 0, SlotIndex(), false);
+ return LiveQueryResult(nullptr, nullptr, SlotIndex(), false);
// Is this an instruction live-in segment?
// If Idx is the start index of a basic block, include live-in segments
// that start at Idx.getBaseIndex().
- VNInfo *EarlyVal = 0;
- VNInfo *LateVal = 0;
+ VNInfo *EarlyVal = nullptr;
+ VNInfo *LateVal = nullptr;
SlotIndex EndPoint;
bool Kill = false;
if (I->start <= Idx.getBaseIndex()) {
@@ -466,7 +466,7 @@ namespace llvm {
// predecessor.
// Such a value is not live-in.
if (EarlyVal->def == Idx.getBaseIndex())
- EarlyVal = 0;
+ EarlyVal = nullptr;
}
// I now points to the segment that may be live-through, or defined by
// this instr. Ignore segments starting after the current instr.
@@ -597,7 +597,7 @@ namespace llvm {
public:
/// Create a LiveRangeUpdater for adding segments to LR.
/// LR will temporarily be in an invalid state until flush() is called.
- LiveRangeUpdater(LiveRange *lr = 0) : LR(lr) {}
+ LiveRangeUpdater(LiveRange *lr = nullptr) : LR(lr) {}
~LiveRangeUpdater() { flush(); }
diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h
index 5492593..ddd623c 100644
--- a/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -137,7 +137,7 @@ namespace llvm {
// Interval removal.
void removeInterval(unsigned Reg) {
delete VirtRegIntervals[Reg];
- VirtRegIntervals[Reg] = 0;
+ VirtRegIntervals[Reg] = nullptr;
}
/// Given a register and an instruction, adds a live segment from that
@@ -153,7 +153,7 @@ namespace llvm {
/// Return true if the interval may have been separated into multiple
/// connected components.
bool shrinkToUses(LiveInterval *li,
- SmallVectorImpl<MachineInstr*> *dead = 0);
+ SmallVectorImpl<MachineInstr*> *dead = nullptr);
/// extendToIndices - Extend the live range of LI to reach all points in
/// Indices. The points in the Indices array must be jointly dominated by
@@ -262,7 +262,7 @@ namespace llvm {
bool runOnMachineFunction(MachineFunction&) override;
/// print - Implement the dump method.
- void print(raw_ostream &O, const Module* = 0) const override;
+ void print(raw_ostream &O, const Module* = nullptr) const override;
/// intervalIsInOneMBB - If LI is confined to a single basic block, return
/// a pointer to that block. If LI is live in to or out of any block,
diff --git a/include/llvm/CodeGen/LiveIntervalUnion.h b/include/llvm/CodeGen/LiveIntervalUnion.h
index 95933d1..2f40509 100644
--- a/include/llvm/CodeGen/LiveIntervalUnion.h
+++ b/include/llvm/CodeGen/LiveIntervalUnion.h
@@ -122,8 +122,8 @@ public:
{}
void clear() {
- LiveUnion = NULL;
- VirtReg = NULL;
+ LiveUnion = nullptr;
+ VirtReg = nullptr;
InterferingVRegs.clear();
CheckedFirstInterference = false;
SeenAllInterferences = false;
@@ -182,7 +182,7 @@ public:
unsigned Size;
LiveIntervalUnion *LIUs;
public:
- Array() : Size(0), LIUs(0) {}
+ Array() : Size(0), LIUs(nullptr) {}
~Array() { clear(); }
// Initialize the array to have Size entries.
diff --git a/include/llvm/CodeGen/LivePhysRegs.h b/include/llvm/CodeGen/LivePhysRegs.h
index c93eaf5..847092b 100644
--- a/include/llvm/CodeGen/LivePhysRegs.h
+++ b/include/llvm/CodeGen/LivePhysRegs.h
@@ -48,7 +48,7 @@ class LivePhysRegs {
LivePhysRegs &operator=(const LivePhysRegs&) LLVM_DELETED_FUNCTION;
public:
/// \brief Constructs a new empty LivePhysRegs set.
- LivePhysRegs() : TRI(0), LiveRegs() {}
+ LivePhysRegs() : TRI(nullptr), LiveRegs() {}
/// \brief Constructs and initialize an empty LivePhysRegs set.
LivePhysRegs(const TargetRegisterInfo *TRI) : TRI(TRI) {
diff --git a/include/llvm/CodeGen/LiveRangeEdit.h b/include/llvm/CodeGen/LiveRangeEdit.h
index 4ce39e3..5767cab 100644
--- a/include/llvm/CodeGen/LiveRangeEdit.h
+++ b/include/llvm/CodeGen/LiveRangeEdit.h
@@ -116,7 +116,7 @@ public:
MachineFunction &MF,
LiveIntervals &lis,
VirtRegMap *vrm,
- Delegate *delegate = 0)
+ Delegate *delegate = nullptr)
: Parent(parent), NewRegs(newRegs),
MRI(MF.getRegInfo()), LIS(lis), VRM(vrm),
TII(*MF.getTarget().getInstrInfo()),
@@ -174,7 +174,7 @@ public:
struct Remat {
VNInfo *ParentVNI; // parent_'s value at the remat location.
MachineInstr *OrigMI; // Instruction defining ParentVNI.
- explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI), OrigMI(0) {}
+ explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI), OrigMI(nullptr) {}
};
/// canRematerializeAt - Determine if ParentVNI can be rematerialized at
diff --git a/include/llvm/CodeGen/LiveRegMatrix.h b/include/llvm/CodeGen/LiveRegMatrix.h
index 28b819b..878b4d9 100644
--- a/include/llvm/CodeGen/LiveRegMatrix.h
+++ b/include/llvm/CodeGen/LiveRegMatrix.h
@@ -25,7 +25,6 @@
#define LLVM_CODEGEN_LIVEREGMATRIX_H
#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/CodeGen/LiveIntervalUnion.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -51,7 +50,7 @@ class LiveRegMatrix : public MachineFunctionPass {
LiveIntervalUnion::Array Matrix;
// Cached queries per register unit.
- OwningArrayPtr<LiveIntervalUnion::Query> Queries;
+ std::unique_ptr<LiveIntervalUnion::Query[]> Queries;
// Cached register mask interference info.
unsigned RegMaskTag;
diff --git a/include/llvm/CodeGen/LiveStackAnalysis.h b/include/llvm/CodeGen/LiveStackAnalysis.h
index ac32a9c..df68398 100644
--- a/include/llvm/CodeGen/LiveStackAnalysis.h
+++ b/include/llvm/CodeGen/LiveStackAnalysis.h
@@ -92,7 +92,7 @@ namespace llvm {
bool runOnMachineFunction(MachineFunction&) override;
/// print - Implement the dump method.
- void print(raw_ostream &O, const Module* = 0) const override;
+ void print(raw_ostream &O, const Module* = nullptr) const override;
};
}
diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h
index 5e86e75..90bdeee4 100644
--- a/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/include/llvm/CodeGen/MachineBasicBlock.h
@@ -160,7 +160,7 @@ public:
template<class OtherTy, class OtherIterTy>
bundle_iterator(const bundle_iterator<OtherTy, OtherIterTy> &I)
: MII(I.getInstrIterator()) {}
- bundle_iterator() : MII(0) {}
+ bundle_iterator() : MII(nullptr) {}
Ty &operator*() const { return *MII; }
Ty *operator->() const { return &operator*(); }
@@ -219,10 +219,15 @@ public:
unsigned size() const { return (unsigned)Insts.size(); }
bool empty() const { return Insts.empty(); }
- MachineInstr& front() { return Insts.front(); }
- MachineInstr& back() { return Insts.back(); }
- const MachineInstr& front() const { return Insts.front(); }
- const MachineInstr& back() const { return Insts.back(); }
+ MachineInstr &instr_front() { return Insts.front(); }
+ MachineInstr &instr_back() { return Insts.back(); }
+ const MachineInstr &instr_front() const { return Insts.front(); }
+ const MachineInstr &instr_back() const { return Insts.back(); }
+
+ MachineInstr &front() { return Insts.front(); }
+ MachineInstr &back() { return *--end(); }
+ const MachineInstr &front() const { return Insts.front(); }
+ const MachineInstr &back() const { return *--end(); }
instr_iterator instr_begin() { return Insts.begin(); }
const_instr_iterator instr_begin() const { return Insts.begin(); }
@@ -242,6 +247,12 @@ public:
reverse_iterator rend () { return instr_rend(); }
const_reverse_iterator rend () const { return instr_rend(); }
+ inline iterator_range<iterator> terminators() {
+ return iterator_range<iterator>(getFirstTerminator(), end());
+ }
+ inline iterator_range<const_iterator> terminators() const {
+ return iterator_range<const_iterator>(getFirstTerminator(), end());
+ }
// Machine-CFG iterators
typedef std::vector<MachineBasicBlock *>::iterator pred_iterator;
@@ -256,7 +267,6 @@ public:
succ_reverse_iterator;
typedef std::vector<MachineBasicBlock *>::const_reverse_iterator
const_succ_reverse_iterator;
-
pred_iterator pred_begin() { return Predecessors.begin(); }
const_pred_iterator pred_begin() const { return Predecessors.begin(); }
pred_iterator pred_end() { return Predecessors.end(); }
@@ -290,6 +300,19 @@ public:
}
bool succ_empty() const { return Successors.empty(); }
+ inline iterator_range<pred_iterator> predecessors() {
+ return iterator_range<pred_iterator>(pred_begin(), pred_end());
+ }
+ inline iterator_range<const_pred_iterator> predecessors() const {
+ return iterator_range<const_pred_iterator>(pred_begin(), pred_end());
+ }
+ inline iterator_range<succ_iterator> successors() {
+ return iterator_range<succ_iterator>(succ_begin(), succ_end());
+ }
+ inline iterator_range<const_succ_iterator> successors() const {
+ return iterator_range<const_succ_iterator>(succ_begin(), succ_end());
+ }
+
// LiveIn management methods.
/// addLiveIn - Add the specified register as a live in. Note that it
@@ -609,7 +632,7 @@ public:
// Debugging methods.
void dump() const;
- void print(raw_ostream &OS, SlotIndexes* = 0) const;
+ void print(raw_ostream &OS, SlotIndexes* = nullptr) const;
// Printing method used by LoopInfo.
void printAsOperand(raw_ostream &OS, bool PrintType = true);
diff --git a/include/llvm/CodeGen/MachineBlockFrequencyInfo.h b/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
index f3ef87c..1aef689 100644
--- a/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
+++ b/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
@@ -1,4 +1,4 @@
-//====-- MachineBlockFrequencyInfo.h - MBB Frequency Analysis -*- C++ -*--====//
+//===- MachineBlockFrequencyInfo.h - MBB Frequency Analysis -*- C++ -*-----===//
//
// The LLVM Compiler Infrastructure
//
@@ -22,14 +22,12 @@ namespace llvm {
class MachineBasicBlock;
class MachineBranchProbabilityInfo;
-template<class BlockT, class FunctionT, class BranchProbInfoT>
-class BlockFrequencyImpl;
+template <class BlockT> class BlockFrequencyInfoImpl;
-/// MachineBlockFrequencyInfo pass uses BlockFrequencyImpl implementation to estimate
-/// machine basic block frequencies.
+/// MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation
+/// to estimate machine basic block frequencies.
class MachineBlockFrequencyInfo : public MachineFunctionPass {
- typedef BlockFrequencyImpl<MachineBasicBlock, MachineFunction,
- MachineBranchProbabilityInfo> ImplType;
+ typedef BlockFrequencyInfoImpl<MachineBasicBlock> ImplType;
std::unique_ptr<ImplType> MBFI;
public:
diff --git a/include/llvm/CodeGen/MachineCodeEmitter.h b/include/llvm/CodeGen/MachineCodeEmitter.h
index f729ced..81b0ba1 100644
--- a/include/llvm/CodeGen/MachineCodeEmitter.h
+++ b/include/llvm/CodeGen/MachineCodeEmitter.h
@@ -262,7 +262,7 @@ public:
// Check for buffer overflow.
if (Size >= (uintptr_t)(BufferEnd-CurBufferPtr)) {
CurBufferPtr = BufferEnd;
- Result = 0;
+ Result = nullptr;
} else {
// Allocate the space.
Result = CurBufferPtr;
diff --git a/include/llvm/CodeGen/MachineCodeInfo.h b/include/llvm/CodeGen/MachineCodeInfo.h
index ba9dfab..820bc87 100644
--- a/include/llvm/CodeGen/MachineCodeInfo.h
+++ b/include/llvm/CodeGen/MachineCodeInfo.h
@@ -27,7 +27,7 @@ private:
void *Address; // The address of the function in memory
public:
- MachineCodeInfo() : Size(0), Address(0) {}
+ MachineCodeInfo() : Size(0), Address(nullptr) {}
void setSize(size_t s) {
Size = s;
diff --git a/include/llvm/CodeGen/MachineFrameInfo.h b/include/llvm/CodeGen/MachineFrameInfo.h
index 1dedd74..bd0ea11 100644
--- a/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/include/llvm/CodeGen/MachineFrameInfo.h
@@ -519,7 +519,7 @@ public:
/// a nonnegative identifier to represent it.
///
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS,
- const AllocaInst *Alloca = 0);
+ const AllocaInst *Alloca = nullptr);
/// CreateSpillStackObject - Create a new statically sized stack object that
/// represents a spill slot, returning a nonnegative identifier to represent
diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h
index 652d63d..f4c2542 100644
--- a/include/llvm/CodeGen/MachineFunction.h
+++ b/include/llvm/CodeGen/MachineFunction.h
@@ -259,6 +259,9 @@ public:
return MBBNumbering[N];
}
+ /// Should we be emitting segmented stack stuff for the function
+ bool shouldSplitStack();
+
/// getNumBlockIDs - Return the number of MBB ID's allocated.
///
unsigned getNumBlockIDs() const { return (unsigned)MBBNumbering.size(); }
@@ -268,12 +271,12 @@ public:
/// dense, and match the ordering of the blocks within the function. If a
/// specific MachineBasicBlock is specified, only that block and those after
/// it are renumbered.
- void RenumberBlocks(MachineBasicBlock *MBBFrom = 0);
+ void RenumberBlocks(MachineBasicBlock *MBBFrom = nullptr);
/// print - Print out the MachineFunction in a format suitable for debugging
/// to the specified stream.
///
- void print(raw_ostream &OS, SlotIndexes* = 0) const;
+ void print(raw_ostream &OS, SlotIndexes* = nullptr) const;
/// viewCFG - This function is meant for use from the debugger. You can just
/// say 'call F->viewCFG()' and a ghostview window should pop up from the
@@ -296,7 +299,7 @@ public:
/// verify - Run the current MachineFunction through the machine code
/// verifier, useful for debugger use.
- void verify(Pass *p = NULL, const char *Banner = NULL) const;
+ void verify(Pass *p = nullptr, const char *Banner = nullptr) const;
// Provide accessors for the MachineBasicBlock list...
typedef BasicBlockListType::iterator iterator;
@@ -364,7 +367,7 @@ public:
/// implementation.
void removeFromMBBNumbering(unsigned N) {
assert(N < MBBNumbering.size() && "Illegal basic block #");
- MBBNumbering[N] = 0;
+ MBBNumbering[N] = nullptr;
}
/// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
@@ -389,7 +392,7 @@ public:
/// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
/// instead of `new MachineBasicBlock'.
///
- MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = 0);
+ MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = nullptr);
/// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
///
@@ -401,8 +404,8 @@ public:
MachineMemOperand *getMachineMemOperand(MachinePointerInfo PtrInfo,
unsigned f, uint64_t s,
unsigned base_alignment,
- const MDNode *TBAAInfo = 0,
- const MDNode *Ranges = 0);
+ const MDNode *TBAAInfo = nullptr,
+ const MDNode *Ranges = nullptr);
/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
/// an existing one, adjusting by an offset and using the given size.
diff --git a/include/llvm/CodeGen/MachineInstr.h b/include/llvm/CodeGen/MachineInstr.h
index f5dc75e..b0d3e02 100644
--- a/include/llvm/CodeGen/MachineInstr.h
+++ b/include/llvm/CodeGen/MachineInstr.h
@@ -24,6 +24,7 @@
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/MC/MCInstrDesc.h"
@@ -243,6 +244,14 @@ public:
///
DebugLoc getDebugLoc() const { return debugLoc; }
+ /// getDebugVariable() - Return the debug variable referenced by
+ /// this DBG_VALUE instruction.
+ DIVariable getDebugVariable() const {
+ assert(isDebugValue() && "not a DBG_VALUE");
+ const MDNode *Var = getOperand(getNumOperands() - 1).getMetadata();
+ return DIVariable(Var);
+ }
+
/// emitError - Emit an error referring to the source location of this
/// instruction. This should only be used for inline assembly that is somehow
/// impossible to compile. Other errors should have been handled much
@@ -287,22 +296,54 @@ public:
const_mop_iterator operands_begin() const { return Operands; }
const_mop_iterator operands_end() const { return Operands + NumOperands; }
- inline iterator_range<mop_iterator> operands() {
+ iterator_range<mop_iterator> operands() {
return iterator_range<mop_iterator>(operands_begin(), operands_end());
}
- inline iterator_range<const_mop_iterator> operands() const {
+ iterator_range<const_mop_iterator> operands() const {
return iterator_range<const_mop_iterator>(operands_begin(), operands_end());
}
+ iterator_range<mop_iterator> explicit_operands() {
+ return iterator_range<mop_iterator>(
+ operands_begin(), operands_begin() + getNumExplicitOperands());
+ }
+ iterator_range<const_mop_iterator> explicit_operands() const {
+ return iterator_range<const_mop_iterator>(
+ operands_begin(), operands_begin() + getNumExplicitOperands());
+ }
+ iterator_range<mop_iterator> implicit_operands() {
+ return iterator_range<mop_iterator>(explicit_operands().end(),
+ operands_end());
+ }
+ iterator_range<const_mop_iterator> implicit_operands() const {
+ return iterator_range<const_mop_iterator>(explicit_operands().end(),
+ operands_end());
+ }
+ iterator_range<mop_iterator> defs() {
+ return iterator_range<mop_iterator>(
+ operands_begin(), operands_begin() + getDesc().getNumDefs());
+ }
+ iterator_range<const_mop_iterator> defs() const {
+ return iterator_range<const_mop_iterator>(
+ operands_begin(), operands_begin() + getDesc().getNumDefs());
+ }
+ iterator_range<mop_iterator> uses() {
+ return iterator_range<mop_iterator>(
+ operands_begin() + getDesc().getNumDefs(), operands_end());
+ }
+ iterator_range<const_mop_iterator> uses() const {
+ return iterator_range<const_mop_iterator>(
+ operands_begin() + getDesc().getNumDefs(), operands_end());
+ }
/// Access to memory operands of the instruction
mmo_iterator memoperands_begin() const { return MemRefs; }
mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; }
bool memoperands_empty() const { return NumMemRefs == 0; }
- inline iterator_range<mmo_iterator> memoperands() {
+ iterator_range<mmo_iterator> memoperands() {
return iterator_range<mmo_iterator>(memoperands_begin(), memoperands_end());
}
- inline iterator_range<mmo_iterator> memoperands() const {
+ iterator_range<mmo_iterator> memoperands() const {
return iterator_range<mmo_iterator>(memoperands_begin(), memoperands_end());
}
@@ -735,7 +776,8 @@ public:
/// is a read of a super-register.
/// This does not count partial redefines of virtual registers as reads:
/// %reg1024:6 = OP.
- bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI = NULL) const {
+ bool readsRegister(unsigned Reg,
+ const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
}
@@ -751,12 +793,13 @@ public:
/// partial defines.
/// If Ops is not null, all operand indices for Reg are added.
std::pair<bool,bool> readsWritesVirtualRegister(unsigned Reg,
- SmallVectorImpl<unsigned> *Ops = 0) const;
+ SmallVectorImpl<unsigned> *Ops = nullptr) const;
/// killsRegister - Return true if the MachineInstr kills the specified
/// register. If TargetRegisterInfo is passed, then it also checks if there is
/// a kill of a super-register.
- bool killsRegister(unsigned Reg, const TargetRegisterInfo *TRI = NULL) const {
+ bool killsRegister(unsigned Reg,
+ const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
}
@@ -764,7 +807,8 @@ public:
/// specified register. If TargetRegisterInfo is passed, then it also checks
/// if there is a def of a super-register.
/// NOTE: It's ignoring subreg indices on virtual registers.
- bool definesRegister(unsigned Reg, const TargetRegisterInfo *TRI=NULL) const {
+ bool definesRegister(unsigned Reg,
+ const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
}
@@ -779,7 +823,7 @@ public:
/// instruction. If TargetRegisterInfo is passed, then it also checks
/// if there is a dead def of a super-register.
bool registerDefIsDead(unsigned Reg,
- const TargetRegisterInfo *TRI = NULL) const {
+ const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
}
@@ -787,14 +831,14 @@ public:
/// the specific register or -1 if it is not found. It further tightens
/// the search criteria to a use that kills the register if isKill is true.
int findRegisterUseOperandIdx(unsigned Reg, bool isKill = false,
- const TargetRegisterInfo *TRI = NULL) const;
+ const TargetRegisterInfo *TRI = nullptr) const;
/// findRegisterUseOperand - Wrapper for findRegisterUseOperandIdx, it returns
/// a pointer to the MachineOperand rather than an index.
MachineOperand *findRegisterUseOperand(unsigned Reg, bool isKill = false,
- const TargetRegisterInfo *TRI = NULL) {
+ const TargetRegisterInfo *TRI = nullptr) {
int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
- return (Idx == -1) ? NULL : &getOperand(Idx);
+ return (Idx == -1) ? nullptr : &getOperand(Idx);
}
/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
@@ -805,14 +849,14 @@ public:
/// This may also return a register mask operand when Overlap is true.
int findRegisterDefOperandIdx(unsigned Reg,
bool isDead = false, bool Overlap = false,
- const TargetRegisterInfo *TRI = NULL) const;
+ const TargetRegisterInfo *TRI = nullptr) const;
/// findRegisterDefOperand - Wrapper for findRegisterDefOperandIdx, it returns
/// a pointer to the MachineOperand rather than an index.
MachineOperand *findRegisterDefOperand(unsigned Reg, bool isDead = false,
- const TargetRegisterInfo *TRI = NULL) {
+ const TargetRegisterInfo *TRI = nullptr) {
int Idx = findRegisterDefOperandIdx(Reg, isDead, false, TRI);
- return (Idx == -1) ? NULL : &getOperand(Idx);
+ return (Idx == -1) ? nullptr : &getOperand(Idx);
}
/// findFirstPredOperandIdx() - Find the index of the first operand in the
@@ -830,7 +874,7 @@ public:
/// The flag operand is an immediate that can be decoded with methods like
/// InlineAsm::hasRegClassConstraint().
///
- int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = 0) const;
+ int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
/// getRegClassConstraint - Compute the static register class constraint for
/// operand OpIdx. For normal instructions, this is derived from the
@@ -892,7 +936,8 @@ public:
/// check if the register def is tied to a source operand, due to either
/// two-address elimination or inline assembly constraints. Returns the
/// first tied use operand index by reference if UseOpIdx is not null.
- bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx = 0) const {
+ bool isRegTiedToUseOperand(unsigned DefOpIdx,
+ unsigned *UseOpIdx = nullptr) const {
const MachineOperand &MO = getOperand(DefOpIdx);
if (!MO.isReg() || !MO.isDef() || !MO.isTied())
return false;
@@ -904,7 +949,8 @@ public:
/// isRegTiedToDefOperand - Return true if the use operand of the specified
/// index is tied to an def operand. It also returns the def operand index by
/// reference if DefOpIdx is not null.
- bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx = 0) const {
+ bool isRegTiedToDefOperand(unsigned UseOpIdx,
+ unsigned *DefOpIdx = nullptr) const {
const MachineOperand &MO = getOperand(UseOpIdx);
if (!MO.isReg() || !MO.isUse() || !MO.isTied())
return false;
@@ -943,7 +989,8 @@ public:
/// addRegisterDefined - We have determined MI defines a register. Make sure
/// there is an operand defining Reg.
- void addRegisterDefined(unsigned Reg, const TargetRegisterInfo *RegInfo = 0);
+ void addRegisterDefined(unsigned Reg,
+ const TargetRegisterInfo *RegInfo = nullptr);
/// setPhysRegsDeadExcept - Mark every physreg used by this instruction as
/// dead except those in the UsedRegs list.
@@ -997,7 +1044,7 @@ public:
//
// Debugging support
//
- void print(raw_ostream &OS, const TargetMachine *TM = 0,
+ void print(raw_ostream &OS, const TargetMachine *TM = nullptr,
bool SkipOpers = false) const;
void dump() const;
@@ -1098,7 +1145,7 @@ private:
/// useful for CSE, etc.
struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> {
static inline MachineInstr *getEmptyKey() {
- return 0;
+ return nullptr;
}
static inline MachineInstr *getTombstoneKey() {
diff --git a/include/llvm/CodeGen/MachineInstrBuilder.h b/include/llvm/CodeGen/MachineInstrBuilder.h
index d7eb706..21a482c 100644
--- a/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -46,7 +46,7 @@ class MachineInstrBuilder {
MachineFunction *MF;
MachineInstr *MI;
public:
- MachineInstrBuilder() : MF(0), MI(0) {}
+ MachineInstrBuilder() : MF(nullptr), MI(nullptr) {}
/// Create a MachineInstrBuilder for manipulating an existing instruction.
/// F must be the machine function that was used to allocate I.
diff --git a/include/llvm/CodeGen/MachineInstrBundle.h b/include/llvm/CodeGen/MachineInstrBundle.h
index 9519edb..1220224 100644
--- a/include/llvm/CodeGen/MachineInstrBundle.h
+++ b/include/llvm/CodeGen/MachineInstrBundle.h
@@ -196,7 +196,7 @@ public:
/// each operand referring to Reg.
/// @returns A filled-in RegInfo struct.
VirtRegInfo analyzeVirtReg(unsigned Reg,
- SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops = 0);
+ SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops = nullptr);
/// analyzePhysReg - Analyze how the current instruction or bundle uses a
/// physical register. This function should not be called after operator++(),
diff --git a/include/llvm/CodeGen/MachineMemOperand.h b/include/llvm/CodeGen/MachineMemOperand.h
index f01b8eb..2532c16 100644
--- a/include/llvm/CodeGen/MachineMemOperand.h
+++ b/include/llvm/CodeGen/MachineMemOperand.h
@@ -16,11 +16,13 @@
#ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
#define LLVM_CODEGEN_MACHINEMEMOPERAND_H
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
#include "llvm/Support/DataTypes.h"
namespace llvm {
-class Value;
class FoldingSetNodeID;
class MDNode;
class raw_ostream;
@@ -33,17 +35,23 @@ struct MachinePointerInfo {
/// V - This is the IR pointer value for the access, or it is null if unknown.
/// If this is null, then the access is to a pointer in the default address
/// space.
- const Value *V;
+ PointerUnion<const Value *, const PseudoSourceValue *> V;
/// Offset - This is an offset from the base Value*.
int64_t Offset;
- explicit MachinePointerInfo(const Value *v = 0, int64_t offset = 0)
+ explicit MachinePointerInfo(const Value *v = nullptr, int64_t offset = 0)
+ : V(v), Offset(offset) {}
+
+ explicit MachinePointerInfo(const PseudoSourceValue *v,
+ int64_t offset = 0)
: V(v), Offset(offset) {}
MachinePointerInfo getWithOffset(int64_t O) const {
- if (V == 0) return MachinePointerInfo(0, 0);
- return MachinePointerInfo(V, Offset+O);
+ if (V.isNull()) return MachinePointerInfo();
+ if (V.is<const Value*>())
+ return MachinePointerInfo(V.get<const Value*>(), Offset+O);
+ return MachinePointerInfo(V.get<const PseudoSourceValue*>(), Offset+O);
}
/// getAddrSpace - Return the LLVM IR address space number that this pointer
@@ -109,8 +117,8 @@ public:
/// MachineMemOperand - Construct an MachineMemOperand object with the
/// specified PtrInfo, flags, size, and base alignment.
MachineMemOperand(MachinePointerInfo PtrInfo, unsigned flags, uint64_t s,
- unsigned base_alignment, const MDNode *TBAAInfo = 0,
- const MDNode *Ranges = 0);
+ unsigned base_alignment, const MDNode *TBAAInfo = nullptr,
+ const MDNode *Ranges = nullptr);
const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
@@ -121,7 +129,13 @@ public:
/// other PseudoSourceValue member functions which return objects which stand
/// for frame/stack pointer relative references and other special references
/// which are not representable in the high-level IR.
- const Value *getValue() const { return PtrInfo.V; }
+ const Value *getValue() const { return PtrInfo.V.dyn_cast<const Value*>(); }
+
+ const PseudoSourceValue *getPseudoValue() const {
+ return PtrInfo.V.dyn_cast<const PseudoSourceValue*>();
+ }
+
+ const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); }
/// getFlags - Return the raw flags of the source value, \see MemOperandFlags.
unsigned int getFlags() const { return Flags & ((1 << MOMaxBits) - 1); }
@@ -177,6 +191,7 @@ public:
/// should only be used when an object is being relocated and all references
/// to it are being updated.
void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
+ void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; }
void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
/// Profile - Gather unique data for the object.
diff --git a/include/llvm/CodeGen/MachineModuleInfo.h b/include/llvm/CodeGen/MachineModuleInfo.h
index 28f4544..6d8d056 100644
--- a/include/llvm/CodeGen/MachineModuleInfo.h
+++ b/include/llvm/CodeGen/MachineModuleInfo.h
@@ -71,7 +71,7 @@ struct LandingPadInfo {
std::vector<int> TypeIds; // List of type ids (filters negative)
explicit LandingPadInfo(MachineBasicBlock *MBB)
- : LandingPadBlock(MBB), LandingPadLabel(0), Personality(0) {}
+ : LandingPadBlock(MBB), LandingPadLabel(nullptr), Personality(nullptr) {}
};
//===----------------------------------------------------------------------===//
@@ -201,7 +201,7 @@ public:
///
template<typename Ty>
Ty &getObjFileInfo() {
- if (ObjFileMMI == 0)
+ if (ObjFileMMI == nullptr)
ObjFileMMI = new Ty(*this);
return *static_cast<Ty*>(ObjFileMMI);
}
@@ -334,7 +334,7 @@ public:
/// TidyLandingPads - Remap landing pad labels and remove any deleted landing
/// pads.
- void TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = 0);
+ void TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = nullptr);
/// getLandingPads - Return a reference to the landing pad info for the
/// current function.
diff --git a/include/llvm/CodeGen/MachineOperand.h b/include/llvm/CodeGen/MachineOperand.h
index 57bdb4c..22969bc8 100644
--- a/include/llvm/CodeGen/MachineOperand.h
+++ b/include/llvm/CodeGen/MachineOperand.h
@@ -42,7 +42,7 @@ class MCSymbol;
///
class MachineOperand {
public:
- enum MachineOperandType {
+ enum MachineOperandType : unsigned char {
MO_Register, ///< Register operand.
MO_Immediate, ///< Immediate operand
MO_CImmediate, ///< Immediate >64bit operand
@@ -65,7 +65,7 @@ public:
private:
/// OpKind - Specify what kind of operand this is. This discriminates the
/// union.
- unsigned char OpKind; // MachineOperandType
+ MachineOperandType OpKind;
/// Subregister number for MO_Register. A value of 0 indicates the
/// MO_Register has no subReg.
@@ -181,7 +181,7 @@ private:
} Contents;
explicit MachineOperand(MachineOperandType K)
- : OpKind(K), SubReg_TargetFlags(0), ParentMI(0) {}
+ : OpKind(K), SubReg_TargetFlags(0), ParentMI(nullptr) {}
public:
/// getType - Returns the MachineOperandType for this operand.
///
@@ -215,9 +215,9 @@ public:
///
/// Never call clearParent() on an operand in a MachineInstr.
///
- void clearParent() { ParentMI = 0; }
+ void clearParent() { ParentMI = nullptr; }
- void print(raw_ostream &os, const TargetMachine *TM = 0) const;
+ void print(raw_ostream &os, const TargetMachine *TM = nullptr) const;
//===--------------------------------------------------------------------===//
// Accessors that tell you what kind of MachineOperand you're looking at.
@@ -227,7 +227,7 @@ public:
bool isReg() const { return OpKind == MO_Register; }
/// isImm - Tests if this is a MO_Immediate operand.
bool isImm() const { return OpKind == MO_Immediate; }
- /// isCImm - Test if t his is a MO_CImmediate operand.
+ /// isCImm - Test if this is a MO_CImmediate operand.
bool isCImm() const { return OpKind == MO_CImmediate; }
/// isFPImm - Tests if this is a MO_FPImmediate operand.
bool isFPImm() const { return OpKind == MO_FPImmediate; }
@@ -593,8 +593,8 @@ public:
Op.TiedTo = 0;
Op.IsDebug = isDebug;
Op.SmallContents.RegNo = Reg;
- Op.Contents.Reg.Prev = 0;
- Op.Contents.Reg.Next = 0;
+ Op.Contents.Reg.Prev = nullptr;
+ Op.Contents.Reg.Next = nullptr;
Op.setSubReg(SubReg);
return Op;
}
@@ -711,12 +711,12 @@ private:
/// part of a machine instruction.
bool isOnRegUseList() const {
assert(isReg() && "Can only add reg operand to use lists");
- return Contents.Reg.Prev != 0;
+ return Contents.Reg.Prev != nullptr;
}
};
inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand& MO) {
- MO.print(OS, 0);
+ MO.print(OS, nullptr);
return OS;
}
diff --git a/include/llvm/CodeGen/MachinePassRegistry.h b/include/llvm/CodeGen/MachinePassRegistry.h
index cd212ab..c962e68 100644
--- a/include/llvm/CodeGen/MachinePassRegistry.h
+++ b/include/llvm/CodeGen/MachinePassRegistry.h
@@ -59,7 +59,7 @@ private:
public:
MachinePassRegistryNode(const char *N, const char *D, MachinePassCtor C)
- : Next(NULL)
+ : Next(nullptr)
, Name(N)
, Description(D)
, Ctor(C)
@@ -123,7 +123,7 @@ class RegisterPassParser : public MachinePassRegistryListener,
public cl::parser<typename RegistryClass::FunctionPassCtor> {
public:
RegisterPassParser() {}
- ~RegisterPassParser() { RegistryClass::setListener(NULL); }
+ ~RegisterPassParser() { RegistryClass::setListener(nullptr); }
void initialize(cl::Option &O) {
cl::parser<typename RegistryClass::FunctionPassCtor>::initialize(O);
diff --git a/include/llvm/CodeGen/MachinePostDominators.h b/include/llvm/CodeGen/MachinePostDominators.h
index a6f9f3d..beb2c4f 100644
--- a/include/llvm/CodeGen/MachinePostDominators.h
+++ b/include/llvm/CodeGen/MachinePostDominators.h
@@ -79,7 +79,7 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
- void print(llvm::raw_ostream &OS, const Module *M = 0) const override;
+ void print(llvm::raw_ostream &OS, const Module *M = nullptr) const override;
};
} //end of namespace llvm
diff --git a/include/llvm/CodeGen/MachineRegisterInfo.h b/include/llvm/CodeGen/MachineRegisterInfo.h
index 2285130..51139f7 100644
--- a/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -135,7 +135,7 @@ public:
// notifications, we will need to change to using a list.
assert(TheDelegate == delegate &&
"Only the current delegate can perform reset!");
- TheDelegate = 0;
+ TheDelegate = nullptr;
}
void setDelegate(Delegate *delegate) {
@@ -223,7 +223,7 @@ public:
reg_iterator reg_begin(unsigned RegNo) const {
return reg_iterator(getRegUseDefListHead(RegNo));
}
- static reg_iterator reg_end() { return reg_iterator(0); }
+ static reg_iterator reg_end() { return reg_iterator(nullptr); }
inline iterator_range<reg_iterator> reg_operands(unsigned Reg) const {
return iterator_range<reg_iterator>(reg_begin(Reg), reg_end());
@@ -236,7 +236,9 @@ public:
reg_instr_iterator reg_instr_begin(unsigned RegNo) const {
return reg_instr_iterator(getRegUseDefListHead(RegNo));
}
- static reg_instr_iterator reg_instr_end() { return reg_instr_iterator(0); }
+ static reg_instr_iterator reg_instr_end() {
+ return reg_instr_iterator(nullptr);
+ }
inline iterator_range<reg_instr_iterator>
reg_instructions(unsigned Reg) const {
@@ -251,7 +253,9 @@ public:
reg_bundle_iterator reg_bundle_begin(unsigned RegNo) const {
return reg_bundle_iterator(getRegUseDefListHead(RegNo));
}
- static reg_bundle_iterator reg_bundle_end() { return reg_bundle_iterator(0); }
+ static reg_bundle_iterator reg_bundle_end() {
+ return reg_bundle_iterator(nullptr);
+ }
inline iterator_range<reg_bundle_iterator> reg_bundles(unsigned Reg) const {
return iterator_range<reg_bundle_iterator>(reg_bundle_begin(Reg),
@@ -269,7 +273,9 @@ public:
reg_nodbg_iterator reg_nodbg_begin(unsigned RegNo) const {
return reg_nodbg_iterator(getRegUseDefListHead(RegNo));
}
- static reg_nodbg_iterator reg_nodbg_end() { return reg_nodbg_iterator(0); }
+ static reg_nodbg_iterator reg_nodbg_end() {
+ return reg_nodbg_iterator(nullptr);
+ }
inline iterator_range<reg_nodbg_iterator>
reg_nodbg_operands(unsigned Reg) const {
@@ -286,7 +292,7 @@ public:
return reg_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static reg_instr_nodbg_iterator reg_instr_nodbg_end() {
- return reg_instr_nodbg_iterator(0);
+ return reg_instr_nodbg_iterator(nullptr);
}
inline iterator_range<reg_instr_nodbg_iterator>
@@ -304,7 +310,7 @@ public:
return reg_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static reg_bundle_nodbg_iterator reg_bundle_nodbg_end() {
- return reg_bundle_nodbg_iterator(0);
+ return reg_bundle_nodbg_iterator(nullptr);
}
inline iterator_range<reg_bundle_nodbg_iterator>
@@ -325,7 +331,7 @@ public:
def_iterator def_begin(unsigned RegNo) const {
return def_iterator(getRegUseDefListHead(RegNo));
}
- static def_iterator def_end() { return def_iterator(0); }
+ static def_iterator def_end() { return def_iterator(nullptr); }
inline iterator_range<def_iterator> def_operands(unsigned Reg) const {
return iterator_range<def_iterator>(def_begin(Reg), def_end());
@@ -338,7 +344,9 @@ public:
def_instr_iterator def_instr_begin(unsigned RegNo) const {
return def_instr_iterator(getRegUseDefListHead(RegNo));
}
- static def_instr_iterator def_instr_end() { return def_instr_iterator(0); }
+ static def_instr_iterator def_instr_end() {
+ return def_instr_iterator(nullptr);
+ }
inline iterator_range<def_instr_iterator>
def_instructions(unsigned Reg) const {
@@ -353,7 +361,9 @@ public:
def_bundle_iterator def_bundle_begin(unsigned RegNo) const {
return def_bundle_iterator(getRegUseDefListHead(RegNo));
}
- static def_bundle_iterator def_bundle_end() { return def_bundle_iterator(0); }
+ static def_bundle_iterator def_bundle_end() {
+ return def_bundle_iterator(nullptr);
+ }
inline iterator_range<def_bundle_iterator> def_bundles(unsigned Reg) const {
return iterator_range<def_bundle_iterator>(def_bundle_begin(Reg),
@@ -379,7 +389,7 @@ public:
use_iterator use_begin(unsigned RegNo) const {
return use_iterator(getRegUseDefListHead(RegNo));
}
- static use_iterator use_end() { return use_iterator(0); }
+ static use_iterator use_end() { return use_iterator(nullptr); }
inline iterator_range<use_iterator> use_operands(unsigned Reg) const {
return iterator_range<use_iterator>(use_begin(Reg), use_end());
@@ -392,7 +402,9 @@ public:
use_instr_iterator use_instr_begin(unsigned RegNo) const {
return use_instr_iterator(getRegUseDefListHead(RegNo));
}
- static use_instr_iterator use_instr_end() { return use_instr_iterator(0); }
+ static use_instr_iterator use_instr_end() {
+ return use_instr_iterator(nullptr);
+ }
inline iterator_range<use_instr_iterator>
use_instructions(unsigned Reg) const {
@@ -407,7 +419,9 @@ public:
use_bundle_iterator use_bundle_begin(unsigned RegNo) const {
return use_bundle_iterator(getRegUseDefListHead(RegNo));
}
- static use_bundle_iterator use_bundle_end() { return use_bundle_iterator(0); }
+ static use_bundle_iterator use_bundle_end() {
+ return use_bundle_iterator(nullptr);
+ }
inline iterator_range<use_bundle_iterator> use_bundles(unsigned Reg) const {
return iterator_range<use_bundle_iterator>(use_bundle_begin(Reg),
@@ -434,7 +448,9 @@ public:
use_nodbg_iterator use_nodbg_begin(unsigned RegNo) const {
return use_nodbg_iterator(getRegUseDefListHead(RegNo));
}
- static use_nodbg_iterator use_nodbg_end() { return use_nodbg_iterator(0); }
+ static use_nodbg_iterator use_nodbg_end() {
+ return use_nodbg_iterator(nullptr);
+ }
inline iterator_range<use_nodbg_iterator>
use_nodbg_operands(unsigned Reg) const {
@@ -451,7 +467,7 @@ public:
return use_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static use_instr_nodbg_iterator use_instr_nodbg_end() {
- return use_instr_nodbg_iterator(0);
+ return use_instr_nodbg_iterator(nullptr);
}
inline iterator_range<use_instr_nodbg_iterator>
@@ -469,7 +485,7 @@ public:
return use_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static use_bundle_nodbg_iterator use_bundle_nodbg_end() {
- return use_bundle_nodbg_iterator(0);
+ return use_bundle_nodbg_iterator(nullptr);
}
inline iterator_range<use_bundle_nodbg_iterator>
@@ -779,7 +795,7 @@ public:
if (!ReturnUses) {
if (Op) {
if (Op->isUse())
- Op = 0;
+ Op = nullptr;
else
assert(!Op->isDebug() && "Can't have debug defs");
}
@@ -797,7 +813,7 @@ public:
MachineInstr, ptrdiff_t>::pointer pointer;
defusechain_iterator(const defusechain_iterator &I) : Op(I.Op) {}
- defusechain_iterator() : Op(0) {}
+ defusechain_iterator() : Op(nullptr) {}
bool operator==(const defusechain_iterator &x) const {
return Op == x.Op;
@@ -807,7 +823,7 @@ public:
}
/// atEnd - return true if this iterator is equal to reg_end() on the value.
- bool atEnd() const { return Op == 0; }
+ bool atEnd() const { return Op == nullptr; }
// Iterator traversal: forward iteration only
defusechain_iterator &operator++() { // Preincrement
@@ -882,7 +898,7 @@ public:
if (!ReturnUses) {
if (Op) {
if (Op->isUse())
- Op = 0;
+ Op = nullptr;
else
assert(!Op->isDebug() && "Can't have debug defs");
}
@@ -900,7 +916,7 @@ public:
MachineInstr, ptrdiff_t>::pointer pointer;
defusechain_instr_iterator(const defusechain_instr_iterator &I) : Op(I.Op){}
- defusechain_instr_iterator() : Op(0) {}
+ defusechain_instr_iterator() : Op(nullptr) {}
bool operator==(const defusechain_instr_iterator &x) const {
return Op == x.Op;
@@ -910,7 +926,7 @@ public:
}
/// atEnd - return true if this iterator is equal to reg_end() on the value.
- bool atEnd() const { return Op == 0; }
+ bool atEnd() const { return Op == nullptr; }
// Iterator traversal: forward iteration only
defusechain_instr_iterator &operator++() { // Preincrement
@@ -957,7 +973,7 @@ class PSetIterator {
const int *PSet;
unsigned Weight;
public:
- PSetIterator(): PSet(0), Weight(0) {}
+ PSetIterator(): PSet(nullptr), Weight(0) {}
PSetIterator(unsigned RegUnit, const MachineRegisterInfo *MRI) {
const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
if (TargetRegisterInfo::isVirtualRegister(RegUnit)) {
@@ -970,7 +986,7 @@ public:
Weight = TRI->getRegUnitWeight(RegUnit);
}
if (*PSet == -1)
- PSet = 0;
+ PSet = nullptr;
}
bool isValid() const { return PSet; }
@@ -982,7 +998,7 @@ public:
assert(isValid() && "Invalid PSetIterator.");
++PSet;
if (*PSet == -1)
- PSet = 0;
+ PSet = nullptr;
}
};
diff --git a/include/llvm/CodeGen/MachineSSAUpdater.h b/include/llvm/CodeGen/MachineSSAUpdater.h
index 8fc367e..486a26e 100644
--- a/include/llvm/CodeGen/MachineSSAUpdater.h
+++ b/include/llvm/CodeGen/MachineSSAUpdater.h
@@ -57,7 +57,7 @@ public:
/// MachineSSAUpdater constructor. If InsertedPHIs is specified, it will be
/// filled in with all PHI Nodes created by rewriting.
explicit MachineSSAUpdater(MachineFunction &MF,
- SmallVectorImpl<MachineInstr*> *InsertedPHIs = 0);
+ SmallVectorImpl<MachineInstr*> *InsertedPHIs = nullptr);
~MachineSSAUpdater();
/// Initialize - Reset this object to get ready for a new set of SSA
diff --git a/include/llvm/CodeGen/MachineScheduler.h b/include/llvm/CodeGen/MachineScheduler.h
index c54300c..acd37e1 100644
--- a/include/llvm/CodeGen/MachineScheduler.h
+++ b/include/llvm/CodeGen/MachineScheduler.h
@@ -81,6 +81,8 @@
#include "llvm/CodeGen/RegisterPressure.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include <memory>
+
namespace llvm {
extern cl::opt<bool> ForceTopDown;
@@ -221,14 +223,14 @@ public:
class ScheduleDAGMI : public ScheduleDAGInstrs {
protected:
AliasAnalysis *AA;
- MachineSchedStrategy *SchedImpl;
+ std::unique_ptr<MachineSchedStrategy> SchedImpl;
/// Topo - A topological ordering for SUnits which permits fast IsReachable
/// and similar queries.
ScheduleDAGTopologicalSort Topo;
/// Ordered list of DAG postprocessing steps.
- std::vector<ScheduleDAGMutation*> Mutations;
+ std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
/// The top of the unscheduled zone.
MachineBasicBlock::iterator CurrentTop;
@@ -246,17 +248,19 @@ protected:
unsigned NumInstrsScheduled;
#endif
public:
- ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S, bool IsPostRA):
- ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, IsPostRA,
- /*RemoveKillFlags=*/IsPostRA, C->LIS),
- AA(C->AA), SchedImpl(S), Topo(SUnits, &ExitSU), CurrentTop(),
- CurrentBottom(), NextClusterPred(NULL), NextClusterSucc(NULL) {
+ ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
+ bool IsPostRA)
+ : ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, IsPostRA,
+ /*RemoveKillFlags=*/IsPostRA, C->LIS),
+ AA(C->AA), SchedImpl(std::move(S)), Topo(SUnits, &ExitSU), CurrentTop(),
+ CurrentBottom(), NextClusterPred(nullptr), NextClusterSucc(nullptr) {
#ifndef NDEBUG
NumInstrsScheduled = 0;
#endif
}
- virtual ~ScheduleDAGMI();
+ // Provide a vtable anchor
+ ~ScheduleDAGMI() override;
/// Return true if this DAG supports VReg liveness and RegPressure.
virtual bool hasVRegLiveness() const { return false; }
@@ -266,8 +270,8 @@ public:
/// building and before MachineSchedStrategy initialization.
///
/// ScheduleDAGMI takes ownership of the Mutation object.
- void addMutation(ScheduleDAGMutation *Mutation) {
- Mutations.push_back(Mutation);
+ void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
+ Mutations.push_back(std::move(Mutation));
}
/// \brief True if an edge can be added from PredSU to SuccSU without creating
@@ -375,11 +379,12 @@ protected:
RegPressureTracker BotRPTracker;
public:
- ScheduleDAGMILive(MachineSchedContext *C, MachineSchedStrategy *S):
- ScheduleDAGMI(C, S, /*IsPostRA=*/false), RegClassInfo(C->RegClassInfo),
- DFSResult(0), ShouldTrackPressure(false), RPTracker(RegPressure),
- TopRPTracker(TopPressure), BotRPTracker(BotPressure)
- {}
+ ScheduleDAGMILive(MachineSchedContext *C,
+ std::unique_ptr<MachineSchedStrategy> S)
+ : ScheduleDAGMI(C, std::move(S), /*IsPostRA=*/false),
+ RegClassInfo(C->RegClassInfo), DFSResult(nullptr),
+ ShouldTrackPressure(false), RPTracker(RegPressure),
+ TopRPTracker(TopPressure), BotRPTracker(BotPressure) {}
virtual ~ScheduleDAGMILive();
@@ -628,9 +633,9 @@ public:
/// Pending queues extend the ready queues with the same ID and the
/// PendingFlag set.
SchedBoundary(unsigned ID, const Twine &Name):
- DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"),
+ DAG(nullptr), SchedModel(nullptr), Rem(nullptr), Available(ID, Name+".A"),
Pending(ID << LogMaxQID, Name+".P"),
- HazardRec(0) {
+ HazardRec(nullptr) {
reset();
}
diff --git a/include/llvm/CodeGen/MachineTraceMetrics.h b/include/llvm/CodeGen/MachineTraceMetrics.h
index dc0bc1d..323b694 100644
--- a/include/llvm/CodeGen/MachineTraceMetrics.h
+++ b/include/llvm/CodeGen/MachineTraceMetrics.h
@@ -154,7 +154,7 @@ public:
unsigned InstrHeight;
TraceBlockInfo() :
- Pred(0), Succ(0),
+ Pred(nullptr), Succ(nullptr),
InstrDepth(~0u), InstrHeight(~0u),
HasValidInstrDepths(false), HasValidInstrHeights(false) {}
diff --git a/include/llvm/CodeGen/MachineValueType.h b/include/llvm/CodeGen/MachineValueType.h
index 84053ca..ad215ec 100644
--- a/include/llvm/CodeGen/MachineValueType.h
+++ b/include/llvm/CodeGen/MachineValueType.h
@@ -16,6 +16,7 @@
#define LLVM_CODEGEN_MACHINEVALUETYPE_H
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
namespace llvm {
diff --git a/include/llvm/CodeGen/PBQP/CostAllocator.h b/include/llvm/CodeGen/PBQP/CostAllocator.h
index 1646334..ff62c09 100644
--- a/include/llvm/CodeGen/PBQP/CostAllocator.h
+++ b/include/llvm/CodeGen/PBQP/CostAllocator.h
@@ -54,7 +54,7 @@ public:
entry->incRef();
}
PoolRef& operator=(const PoolRef &r) {
- assert(entry != 0 && "entry should not be null.");
+ assert(entry != nullptr && "entry should not be null.");
PoolEntry *temp = r.entry;
temp->incRef();
entry->decRef();
diff --git a/include/llvm/CodeGen/PBQP/Graph.h b/include/llvm/CodeGen/PBQP/Graph.h
index 07c3337..a55f0ea 100644
--- a/include/llvm/CodeGen/PBQP/Graph.h
+++ b/include/llvm/CodeGen/PBQP/Graph.h
@@ -29,12 +29,12 @@ namespace PBQP {
typedef unsigned NodeId;
typedef unsigned EdgeId;
- /// \brief Returns a value representing an invalid (non-existant) node.
+ /// \brief Returns a value representing an invalid (non-existent) node.
static NodeId invalidNodeId() {
return std::numeric_limits<NodeId>::max();
}
- /// \brief Returns a value representing an invalid (non-existant) edge.
+ /// \brief Returns a value representing an invalid (non-existent) edge.
static EdgeId invalidEdgeId() {
return std::numeric_limits<EdgeId>::max();
}
@@ -336,7 +336,7 @@ namespace PBQP {
/// each node in the graph, and handleAddEdge for each edge, to give the
/// solver an opportunity to set up any requried metadata.
void setSolver(SolverT &S) {
- assert(Solver == nullptr && "Solver already set. Call unsetSolver().");
+ assert(!Solver && "Solver already set. Call unsetSolver().");
Solver = &S;
for (auto NId : nodeIds())
Solver->handleAddNode(NId);
@@ -346,7 +346,7 @@ namespace PBQP {
/// \brief Release from solver instance.
void unsetSolver() {
- assert(Solver != nullptr && "Solver not set.");
+ assert(Solver && "Solver not set.");
Solver = nullptr;
}
diff --git a/include/llvm/CodeGen/PBQP/RegAllocSolver.h b/include/llvm/CodeGen/PBQP/RegAllocSolver.h
index 79ff6b4..977c348 100644
--- a/include/llvm/CodeGen/PBQP/RegAllocSolver.h
+++ b/include/llvm/CodeGen/PBQP/RegAllocSolver.h
@@ -86,7 +86,7 @@ namespace PBQP {
ConservativelyAllocatable,
NotProvablyAllocatable } ReductionState;
- NodeMetadata() : RS(Unprocessed), DeniedOpts(0), OptUnsafeEdges(0) {}
+ NodeMetadata() : RS(Unprocessed), DeniedOpts(0), OptUnsafeEdges(nullptr){}
~NodeMetadata() { delete[] OptUnsafeEdges; }
void setup(const Vector& Costs) {
@@ -346,7 +346,7 @@ namespace PBQP {
typedef Graph<RegAllocSolverImpl> Graph;
- Solution solve(Graph& G) {
+ inline Solution solve(Graph& G) {
if (G.empty())
return Solution();
RegAllocSolverImpl RegAllocSolver(G);
diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h
index 5d68f86..35210f1 100644
--- a/include/llvm/CodeGen/Passes.h
+++ b/include/llvm/CodeGen/Passes.h
@@ -59,7 +59,7 @@ class IdentifyingPassPtr {
};
bool IsInstance;
public:
- IdentifyingPassPtr() : P(0), IsInstance(false) {}
+ IdentifyingPassPtr() : P(nullptr), IsInstance(false) {}
IdentifyingPassPtr(AnalysisID IDPtr) : ID(IDPtr), IsInstance(false) {}
IdentifyingPassPtr(Pass *InstancePtr) : P(InstancePtr), IsInstance(true) {}
@@ -133,10 +133,6 @@ public:
return *static_cast<TMC*>(TM);
}
- const TargetLowering *getTargetLowering() const {
- return TM->getTargetLowering();
- }
-
//
void setInitialized() { Initialized = true; }
@@ -151,7 +147,7 @@ public:
void setStartStopPasses(AnalysisID Start, AnalysisID Stop) {
StartAfter = Start;
StopAfter = Stop;
- Started = (StartAfter == 0);
+ Started = (StartAfter == nullptr);
}
void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); }
@@ -218,14 +214,14 @@ public:
/// Return NULL to select the default (generic) machine scheduler.
virtual ScheduleDAGInstrs *
createMachineScheduler(MachineSchedContext *C) const {
- return 0;
+ return nullptr;
}
/// Similar to createMachineScheduler but used when postRA machine scheduling
/// is enabled.
virtual ScheduleDAGInstrs *
createPostMachineScheduler(MachineSchedContext *C) const {
- return 0;
+ return nullptr;
}
protected:
@@ -349,6 +345,8 @@ protected:
/// List of target independent CodeGen pass IDs.
namespace llvm {
+ FunctionPass *createAtomicExpandLoadLinkedPass(const TargetMachine *TM);
+
/// \brief Create a basic TargetTransformInfo analysis pass.
///
/// This pass implements the target transform info analysis using the target
@@ -372,7 +370,10 @@ namespace llvm {
/// createCodeGenPreparePass - Transform the code to expose more pattern
/// matching during instruction selection.
- FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = 0);
+ FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = nullptr);
+
+ /// AtomicExpandLoadLinkedID -- FIXME
+ extern char &AtomicExpandLoadLinkedID;
/// MachineLoopInfo - This pass is a loop analysis pass.
extern char &MachineLoopInfoID;
@@ -547,7 +548,7 @@ namespace llvm {
/// createMachineVerifierPass - This pass verifies cenerated machine code
/// instructions for correctness.
///
- FunctionPass *createMachineVerifierPass(const char *Banner = 0);
+ FunctionPass *createMachineVerifierPass(const char *Banner = nullptr);
/// createDwarfEHPass - This pass mulches exception handling code into a form
/// adapted to code generation. Required if using dwarf exception handling.
diff --git a/include/llvm/CodeGen/PseudoSourceValue.h b/include/llvm/CodeGen/PseudoSourceValue.h
index 0af8915..cc3e25a 100644
--- a/include/llvm/CodeGen/PseudoSourceValue.h
+++ b/include/llvm/CodeGen/PseudoSourceValue.h
@@ -18,21 +18,32 @@
namespace llvm {
class MachineFrameInfo;
+ class MachineMemOperand;
class raw_ostream;
+ raw_ostream &operator<<(raw_ostream &OS, const MachineMemOperand &MMO);
+
/// PseudoSourceValue - Special value supplied for machine level alias
/// analysis. It indicates that a memory access references the functions
/// stack frame (e.g., a spill slot), below the stack frame (e.g., argument
/// space), or constant pool.
- class PseudoSourceValue : public Value {
+ class PseudoSourceValue {
private:
+ friend raw_ostream &llvm::operator<<(raw_ostream &OS,
+ const MachineMemOperand &MMO);
+
/// printCustom - Implement printing for PseudoSourceValue. This is called
/// from Value::print or Value's operator<<.
///
- void printCustom(raw_ostream &O) const override;
+ virtual void printCustom(raw_ostream &O) const;
public:
- explicit PseudoSourceValue(enum ValueTy Subclass = PseudoSourceValueVal);
+ /// isFixed - Whether this is a FixedStackPseudoSourceValue.
+ bool isFixed;
+
+ explicit PseudoSourceValue(bool isFixed = false);
+
+ virtual ~PseudoSourceValue();
/// isConstant - Test whether the memory pointed to by this
/// PseudoSourceValue has a constant value.
@@ -47,14 +58,6 @@ namespace llvm {
/// PseudoSourceValue can ever alias an LLVM IR Value.
virtual bool mayAlias(const MachineFrameInfo *) const;
- /// classof - Methods for support type inquiry through isa, cast, and
- /// dyn_cast:
- ///
- static inline bool classof(const Value *V) {
- return V->getValueID() == PseudoSourceValueVal ||
- V->getValueID() == FixedStackPseudoSourceValueVal;
- }
-
/// A pseudo source value referencing a fixed stack frame entry,
/// e.g., a spill slot.
static const PseudoSourceValue *getFixedStack(int FI);
@@ -84,13 +87,13 @@ namespace llvm {
const int FI;
public:
explicit FixedStackPseudoSourceValue(int fi) :
- PseudoSourceValue(FixedStackPseudoSourceValueVal), FI(fi) {}
+ PseudoSourceValue(true), FI(fi) {}
/// classof - Methods for support type inquiry through isa, cast, and
/// dyn_cast:
///
- static inline bool classof(const Value *V) {
- return V->getValueID() == FixedStackPseudoSourceValueVal;
+ static inline bool classof(const PseudoSourceValue *V) {
+ return V->isFixed == true;
}
bool isConstant(const MachineFrameInfo *MFI) const override;
diff --git a/include/llvm/CodeGen/RegAllocPBQP.h b/include/llvm/CodeGen/RegAllocPBQP.h
index efd7c61..6343bb7 100644
--- a/include/llvm/CodeGen/RegAllocPBQP.h
+++ b/include/llvm/CodeGen/RegAllocPBQP.h
@@ -159,7 +159,7 @@ namespace llvm {
FunctionPass *
createPBQPRegisterAllocator(std::unique_ptr<PBQPBuilder> &builder,
- char *customPassID = 0);
+ char *customPassID = nullptr);
}
#endif /* LLVM_CODEGEN_REGALLOCPBQP_H */
diff --git a/include/llvm/CodeGen/RegisterClassInfo.h b/include/llvm/CodeGen/RegisterClassInfo.h
index 9ec12bd..d784dfb 100644
--- a/include/llvm/CodeGen/RegisterClassInfo.h
+++ b/include/llvm/CodeGen/RegisterClassInfo.h
@@ -19,7 +19,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
@@ -31,7 +30,7 @@ class RegisterClassInfo {
bool ProperSubClass;
uint8_t MinCost;
uint16_t LastCostChange;
- OwningArrayPtr<MCPhysReg> Order;
+ std::unique_ptr<MCPhysReg[]> Order;
RCInfo()
: Tag(0), NumRegs(0), ProperSubClass(false), MinCost(0),
@@ -43,7 +42,7 @@ class RegisterClassInfo {
};
// Brief cached information for each register class.
- OwningArrayPtr<RCInfo> RegClass;
+ std::unique_ptr<RCInfo[]> RegClass;
// Tag changes whenever cached information needs to be recomputed. An RCInfo
// entry is valid when its tag matches.
@@ -54,7 +53,7 @@ class RegisterClassInfo {
// Callee saved registers of last MF. Assumed to be valid until the next
// runOnFunction() call.
- const uint16_t *CalleeSaved;
+ const MCPhysReg *CalleeSaved;
// Map register number to CalleeSaved index + 1;
SmallVector<uint8_t, 4> CSRNum;
@@ -62,7 +61,7 @@ class RegisterClassInfo {
// Reserved registers in the current MF.
BitVector Reserved;
- OwningArrayPtr<unsigned> PSetLimits;
+ std::unique_ptr<unsigned[]> PSetLimits;
// Compute all information about RC.
void compute(const TargetRegisterClass *RC) const;
diff --git a/include/llvm/CodeGen/RegisterPressure.h b/include/llvm/CodeGen/RegisterPressure.h
index a801d1d..c11a6ac 100644
--- a/include/llvm/CodeGen/RegisterPressure.h
+++ b/include/llvm/CodeGen/RegisterPressure.h
@@ -158,7 +158,7 @@ class PressureDiffs {
unsigned Size;
unsigned Max;
public:
- PressureDiffs(): PDiffArray(0), Size(0), Max(0) {}
+ PressureDiffs(): PDiffArray(nullptr), Size(0), Max(0) {}
~PressureDiffs() { free(PDiffArray); }
void clear() { Size = 0; }
@@ -285,12 +285,12 @@ class RegPressureTracker {
public:
RegPressureTracker(IntervalPressure &rp) :
- MF(0), TRI(0), RCI(0), LIS(0), MBB(0), P(rp), RequireIntervals(true),
- TrackUntiedDefs(false) {}
+ MF(nullptr), TRI(nullptr), RCI(nullptr), LIS(nullptr), MBB(nullptr), P(rp),
+ RequireIntervals(true), TrackUntiedDefs(false) {}
RegPressureTracker(RegionPressure &rp) :
- MF(0), TRI(0), RCI(0), LIS(0), MBB(0), P(rp), RequireIntervals(false),
- TrackUntiedDefs(false) {}
+ MF(nullptr), TRI(nullptr), RCI(nullptr), LIS(nullptr), MBB(nullptr), P(rp),
+ RequireIntervals(false), TrackUntiedDefs(false) {}
void reset();
@@ -318,7 +318,8 @@ public:
SlotIndex getCurrSlot() const;
/// Recede across the previous instruction.
- bool recede(SmallVectorImpl<unsigned> *LiveUses = 0, PressureDiff *PDiff = 0);
+ bool recede(SmallVectorImpl<unsigned> *LiveUses = nullptr,
+ PressureDiff *PDiff = nullptr);
/// Advance across the current instruction.
bool advance();
@@ -393,7 +394,7 @@ public:
MaxPressureLimit);
assert(isBottomClosed() && "Uninitialized pressure tracker");
- return getMaxUpwardPressureDelta(MI, 0, Delta, CriticalPSets,
+ return getMaxUpwardPressureDelta(MI, nullptr, Delta, CriticalPSets,
MaxPressureLimit);
}
diff --git a/include/llvm/CodeGen/RegisterScavenging.h b/include/llvm/CodeGen/RegisterScavenging.h
index 28ebe53..335dd7f 100644
--- a/include/llvm/CodeGen/RegisterScavenging.h
+++ b/include/llvm/CodeGen/RegisterScavenging.h
@@ -42,7 +42,7 @@ class RegScavenger {
/// Information on scavenged registers (held in a spill slot).
struct ScavengedInfo {
- ScavengedInfo(int FI = -1) : FrameIndex(FI), Reg(0), Restore(NULL) {}
+ ScavengedInfo(int FI = -1) : FrameIndex(FI), Reg(0), Restore(nullptr) {}
/// A spill slot used for scavenging a register post register allocation.
int FrameIndex;
@@ -73,7 +73,7 @@ class RegScavenger {
public:
RegScavenger()
- : MBB(NULL), NumPhysRegs(0), Tracking(false) {}
+ : MBB(nullptr), NumPhysRegs(0), Tracking(false) {}
/// enterBasicBlock - Start tracking liveness from the begin of the specific
/// basic block.
@@ -104,7 +104,7 @@ public:
/// skipTo - Move the internal MBB iterator but do not update register states.
void skipTo(MachineBasicBlock::iterator I) {
- if (I == MachineBasicBlock::iterator(NULL))
+ if (I == MachineBasicBlock::iterator(nullptr))
Tracking = false;
MBBI = I;
}
diff --git a/include/llvm/CodeGen/ResourcePriorityQueue.h b/include/llvm/CodeGen/ResourcePriorityQueue.h
index 7ae9111..114fe7c 100644
--- a/include/llvm/CodeGen/ResourcePriorityQueue.h
+++ b/include/llvm/CodeGen/ResourcePriorityQueue.h
@@ -92,7 +92,7 @@ namespace llvm {
void updateNode(const SUnit *SU) override {}
void releaseState() override {
- SUnits = 0;
+ SUnits = nullptr;
}
unsigned getLatency(unsigned NodeNum) const {
diff --git a/include/llvm/CodeGen/ScheduleDAG.h b/include/llvm/CodeGen/ScheduleDAG.h
index 4886e5c..5a65d59 100644
--- a/include/llvm/CodeGen/ScheduleDAG.h
+++ b/include/llvm/CodeGen/ScheduleDAG.h
@@ -95,7 +95,7 @@ namespace llvm {
/// SDep - Construct a null SDep. This is only for use by container
/// classes which require default constructors. SUnits may not
/// have null SDep edges.
- SDep() : Dep(0, Data) {}
+ SDep() : Dep(nullptr, Data) {}
/// SDep - Construct an SDep with the specified values.
SDep(SUnit *S, Kind kind, unsigned Reg)
@@ -317,46 +317,49 @@ namespace llvm {
/// SUnit - Construct an SUnit for pre-regalloc scheduling to represent
/// an SDNode and any nodes flagged to it.
SUnit(SDNode *node, unsigned nodenum)
- : Node(node), Instr(0), OrigNode(0), SchedClass(0), NodeNum(nodenum),
- NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
- NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
- Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
- isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
- hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
- isAvailable(false), isScheduled(false), isScheduleHigh(false),
- isScheduleLow(false), isCloned(false), isUnbuffered(false),
- hasReservedResource(false), SchedulingPref(Sched::None),
- isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
- TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
+ : Node(node), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr),
+ NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0),
+ NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
+ NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
+ isCallOp(false), isTwoAddress(false), isCommutable(false),
+ hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
+ isPending(false), isAvailable(false), isScheduled(false),
+ isScheduleHigh(false), isScheduleLow(false), isCloned(false),
+ isUnbuffered(false), hasReservedResource(false),
+ SchedulingPref(Sched::None), isDepthCurrent(false),
+ isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
+ BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
/// SUnit - Construct an SUnit for post-regalloc scheduling to represent
/// a MachineInstr.
SUnit(MachineInstr *instr, unsigned nodenum)
- : Node(0), Instr(instr), OrigNode(0), SchedClass(0), NodeNum(nodenum),
- NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
- NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
- Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
- isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
- hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
- isAvailable(false), isScheduled(false), isScheduleHigh(false),
- isScheduleLow(false), isCloned(false), isUnbuffered(false),
- hasReservedResource(false), SchedulingPref(Sched::None),
- isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
- TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
+ : Node(nullptr), Instr(instr), OrigNode(nullptr), SchedClass(nullptr),
+ NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0),
+ NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
+ NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
+ isCallOp(false), isTwoAddress(false), isCommutable(false),
+ hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
+ isPending(false), isAvailable(false), isScheduled(false),
+ isScheduleHigh(false), isScheduleLow(false), isCloned(false),
+ isUnbuffered(false), hasReservedResource(false),
+ SchedulingPref(Sched::None), isDepthCurrent(false),
+ isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
+ BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
/// SUnit - Construct a placeholder SUnit.
SUnit()
- : Node(0), Instr(0), OrigNode(0), SchedClass(0), NodeNum(BoundaryID),
- NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
- NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
- Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
- isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
- hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
- isAvailable(false), isScheduled(false), isScheduleHigh(false),
- isScheduleLow(false), isCloned(false), isUnbuffered(false),
- hasReservedResource(false), SchedulingPref(Sched::None),
- isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
- TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
+ : Node(nullptr), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr),
+ NodeNum(BoundaryID), NodeQueueId(0), NumPreds(0), NumSuccs(0),
+ NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
+ NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
+ isCallOp(false), isTwoAddress(false), isCommutable(false),
+ hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
+ isPending(false), isAvailable(false), isScheduled(false),
+ isScheduleHigh(false), isScheduleLow(false), isCloned(false),
+ isUnbuffered(false), hasReservedResource(false),
+ SchedulingPref(Sched::None), isDepthCurrent(false),
+ isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
+ BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
/// \brief Boundary nodes are placeholders for the boundary of the
/// scheduling region.
diff --git a/include/llvm/CodeGen/ScheduleDAGInstrs.h b/include/llvm/CodeGen/ScheduleDAGInstrs.h
index 72bbe8b..e6754a2 100644
--- a/include/llvm/CodeGen/ScheduleDAGInstrs.h
+++ b/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -158,7 +158,7 @@ namespace llvm {
const MachineDominatorTree &mdt,
bool IsPostRAFlag,
bool RemoveKillFlags = false,
- LiveIntervals *LIS = 0);
+ LiveIntervals *LIS = nullptr);
virtual ~ScheduleDAGInstrs() {}
@@ -206,8 +206,9 @@ namespace llvm {
/// buildSchedGraph - Build SUnits from the MachineBasicBlock that we are
/// input.
- void buildSchedGraph(AliasAnalysis *AA, RegPressureTracker *RPTracker = 0,
- PressureDiffs *PDiffs = 0);
+ void buildSchedGraph(AliasAnalysis *AA,
+ RegPressureTracker *RPTracker = nullptr,
+ PressureDiffs *PDiffs = nullptr);
/// addSchedBarrierDeps - Add dependencies from instructions in the current
/// list of instructions being scheduled to scheduling barrier. We want to
@@ -259,10 +260,10 @@ namespace llvm {
/// newSUnit - Creates a new SUnit and return a ptr to it.
inline SUnit *ScheduleDAGInstrs::newSUnit(MachineInstr *MI) {
#ifndef NDEBUG
- const SUnit *Addr = SUnits.empty() ? 0 : &SUnits[0];
+ const SUnit *Addr = SUnits.empty() ? nullptr : &SUnits[0];
#endif
SUnits.push_back(SUnit(MI, (unsigned)SUnits.size()));
- assert((Addr == 0 || Addr == &SUnits[0]) &&
+ assert((Addr == nullptr || Addr == &SUnits[0]) &&
"SUnits std::vector reallocated on the fly!");
SUnits.back().OrigNode = &SUnits.back();
return &SUnits.back();
@@ -272,7 +273,7 @@ namespace llvm {
inline SUnit *ScheduleDAGInstrs::getSUnit(MachineInstr *MI) const {
DenseMap<MachineInstr*, SUnit*>::const_iterator I = MISUnitMap.find(MI);
if (I == MISUnitMap.end())
- return 0;
+ return nullptr;
return I->second;
}
} // namespace llvm
diff --git a/include/llvm/CodeGen/ScoreboardHazardRecognizer.h b/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
index fbbbb0c..ab14c2d 100644
--- a/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
+++ b/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
@@ -47,7 +47,7 @@ class ScoreboardHazardRecognizer : public ScheduleHazardRecognizer {
// Indices into the Scoreboard that represent the current cycle.
size_t Head;
public:
- Scoreboard():Data(NULL), Depth(0), Head(0) { }
+ Scoreboard():Data(nullptr), Depth(0), Head(0) { }
~Scoreboard() {
delete[] Data;
}
@@ -62,7 +62,7 @@ class ScoreboardHazardRecognizer : public ScheduleHazardRecognizer {
}
void reset(size_t d = 1) {
- if (Data == NULL) {
+ if (!Data) {
Depth = d;
Data = new unsigned[Depth];
}
diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h
index a30656a..d9c38c0 100644
--- a/include/llvm/CodeGen/SelectionDAG.h
+++ b/include/llvm/CodeGen/SelectionDAG.h
@@ -392,7 +392,7 @@ public:
SDVTList getVTList(EVT VT1, EVT VT2);
SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3);
SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4);
- SDVTList getVTList(const EVT *VTs, unsigned NumVTs);
+ SDVTList getVTList(ArrayRef<EVT> VTs);
//===--------------------------------------------------------------------===//
// Node creation methods.
@@ -496,7 +496,8 @@ public:
SDValue Glue) {
SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
- return getNode(ISD::CopyToReg, dl, VTs, Ops, Glue.getNode() ? 4 : 3);
+ return getNode(ISD::CopyToReg, dl, VTs,
+ ArrayRef<SDValue>(Ops, Glue.getNode() ? 4 : 3));
}
// Similar to last getCopyToReg() except parameter Reg is a SDValue
@@ -504,13 +505,14 @@ public:
SDValue Glue) {
SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, Reg, N, Glue };
- return getNode(ISD::CopyToReg, dl, VTs, Ops, Glue.getNode() ? 4 : 3);
+ return getNode(ISD::CopyToReg, dl, VTs,
+ ArrayRef<SDValue>(Ops, Glue.getNode() ? 4 : 3));
}
SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT) {
SDVTList VTs = getVTList(VT, MVT::Other);
SDValue Ops[] = { Chain, getRegister(Reg, VT) };
- return getNode(ISD::CopyFromReg, dl, VTs, Ops, 2);
+ return getNode(ISD::CopyFromReg, dl, VTs, Ops);
}
// This version of the getCopyFromReg method takes an extra operand, which
@@ -520,7 +522,8 @@ public:
SDValue Glue) {
SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
- return getNode(ISD::CopyFromReg, dl, VTs, Ops, Glue.getNode() ? 3 : 2);
+ return getNode(ISD::CopyFromReg, dl, VTs,
+ ArrayRef<SDValue>(Ops, Glue.getNode() ? 3 : 2));
}
SDValue getCondCode(ISD::CondCode Cond);
@@ -554,16 +557,24 @@ public:
/// value assuming it was the smaller SrcTy value.
SDValue getZeroExtendInReg(SDValue Op, SDLoc DL, EVT SrcTy);
+ /// getBoolExtOrTrunc - Convert Op, which must be of integer type, to the
+ /// integer type VT, by using an extension appropriate for the target's
+ /// BooleanContent or truncating it.
+ SDValue getBoolExtOrTrunc(SDValue Op, SDLoc SL, EVT VT);
+
/// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
SDValue getNOT(SDLoc DL, SDValue Val, EVT VT);
+ /// \brief Create a logical NOT operation as (XOR Val, BooleanOne).
+ SDValue getLogicalNOT(SDLoc DL, SDValue Val, EVT VT);
+
/// getCALLSEQ_START - Return a new CALLSEQ_START node, which always must have
/// a glue result (to ensure it's not CSE'd). CALLSEQ_START does not have a
/// useful SDLoc.
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, SDLoc DL) {
SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
SDValue Ops[] = { Chain, Op };
- return getNode(ISD::CALLSEQ_START, DL, VTs, Ops, 2);
+ return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
}
/// getCALLSEQ_END - Return a new CALLSEQ_END node, which always must have a
@@ -576,9 +587,9 @@ public:
Ops.push_back(Chain);
Ops.push_back(Op1);
Ops.push_back(Op2);
- Ops.push_back(InGlue);
- return getNode(ISD::CALLSEQ_END, DL, NodeTys, &Ops[0],
- (unsigned)Ops.size() - (InGlue.getNode() == 0 ? 1 : 0));
+ if (InGlue.getNode())
+ Ops.push_back(InGlue);
+ return getNode(ISD::CALLSEQ_END, DL, NodeTys, Ops);
}
/// getUNDEF - Return an UNDEF node. UNDEF does not have a useful SDLoc.
@@ -604,17 +615,14 @@ public:
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT,
SDValue N1, SDValue N2, SDValue N3, SDValue N4,
SDValue N5);
+ SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, ArrayRef<SDUse> Ops);
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT,
- const SDUse *Ops, unsigned NumOps);
- SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT,
- const SDValue *Ops, unsigned NumOps);
+ ArrayRef<SDValue> Ops);
SDValue getNode(unsigned Opcode, SDLoc DL,
ArrayRef<EVT> ResultTys,
- const SDValue *Ops, unsigned NumOps);
- SDValue getNode(unsigned Opcode, SDLoc DL, const EVT *VTs, unsigned NumVTs,
- const SDValue *Ops, unsigned NumOps);
+ ArrayRef<SDValue> Ops);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
- const SDValue *Ops, unsigned NumOps);
+ ArrayRef<SDValue> Ops);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N);
SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
@@ -705,7 +713,7 @@ public:
/// getAtomic - Gets a node for an atomic op, produces result (if relevant)
/// and chain and takes 2 operands.
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDValue Chain,
- SDValue Ptr, SDValue Val, const Value* PtrVal,
+ SDValue Ptr, SDValue Val, const Value *PtrVal,
unsigned Alignment, AtomicOrdering Ordering,
SynchronizationScope SynchScope);
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDValue Chain,
@@ -716,11 +724,6 @@ public:
/// getAtomic - Gets a node for an atomic op, produces result and chain and
/// takes 1 operand.
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, EVT VT,
- SDValue Chain, SDValue Ptr, const Value* PtrVal,
- unsigned Alignment,
- AtomicOrdering Ordering,
- SynchronizationScope SynchScope);
- SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, EVT VT,
SDValue Chain, SDValue Ptr, MachineMemOperand *MMO,
AtomicOrdering Ordering,
SynchronizationScope SynchScope);
@@ -728,37 +731,30 @@ public:
/// getAtomic - Gets a node for an atomic op, produces result and chain and
/// takes N operands.
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTList,
- SDValue *Ops, unsigned NumOps, MachineMemOperand *MMO,
+ ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope);
SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTList,
- SDValue *Ops, unsigned NumOps, MachineMemOperand *MMO,
+ ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
AtomicOrdering Ordering, SynchronizationScope SynchScope);
/// getMemIntrinsicNode - Creates a MemIntrinsicNode that may produce a
/// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
/// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
/// less than FIRST_TARGET_MEMORY_OPCODE.
- SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl,
- const EVT *VTs, unsigned NumVTs,
- const SDValue *Ops, unsigned NumOps,
- EVT MemVT, MachinePointerInfo PtrInfo,
- unsigned Align = 0, bool Vol = false,
- bool ReadMem = true, bool WriteMem = true);
-
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
- const SDValue *Ops, unsigned NumOps,
+ ArrayRef<SDValue> Ops,
EVT MemVT, MachinePointerInfo PtrInfo,
unsigned Align = 0, bool Vol = false,
bool ReadMem = true, bool WriteMem = true);
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
- const SDValue *Ops, unsigned NumOps,
+ ArrayRef<SDValue> Ops,
EVT MemVT, MachineMemOperand *MMO);
/// getMergeValues - Create a MERGE_VALUES node from the given operands.
- SDValue getMergeValues(const SDValue *Ops, unsigned NumOps, SDLoc dl);
+ SDValue getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl);
/// getLoad - Loads are not normal binary operators: their result type is not
/// determined by their operands, and they produce a value AND a token chain.
@@ -766,14 +762,15 @@ public:
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
MachinePointerInfo PtrInfo, bool isVolatile,
bool isNonTemporal, bool isInvariant, unsigned Alignment,
- const MDNode *TBAAInfo = 0, const MDNode *Ranges = 0);
+ const MDNode *TBAAInfo = nullptr,
+ const MDNode *Ranges = nullptr);
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
MachineMemOperand *MMO);
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo,
EVT MemVT, bool isVolatile,
bool isNonTemporal, unsigned Alignment,
- const MDNode *TBAAInfo = 0);
+ const MDNode *TBAAInfo = nullptr);
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
SDValue Chain, SDValue Ptr, EVT MemVT,
MachineMemOperand *MMO);
@@ -784,8 +781,8 @@ public:
SDValue Chain, SDValue Ptr, SDValue Offset,
MachinePointerInfo PtrInfo, EVT MemVT,
bool isVolatile, bool isNonTemporal, bool isInvariant,
- unsigned Alignment, const MDNode *TBAAInfo = 0,
- const MDNode *Ranges = 0);
+ unsigned Alignment, const MDNode *TBAAInfo = nullptr,
+ const MDNode *Ranges = nullptr);
SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, SDLoc dl,
SDValue Chain, SDValue Ptr, SDValue Offset,
@@ -796,14 +793,14 @@ public:
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
MachinePointerInfo PtrInfo, bool isVolatile,
bool isNonTemporal, unsigned Alignment,
- const MDNode *TBAAInfo = 0);
+ const MDNode *TBAAInfo = nullptr);
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
MachineMemOperand *MMO);
SDValue getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
MachinePointerInfo PtrInfo, EVT TVT,
bool isNonTemporal, bool isVolatile,
unsigned Alignment,
- const MDNode *TBAAInfo = 0);
+ const MDNode *TBAAInfo = nullptr);
SDValue getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
EVT TVT, MachineMemOperand *MMO);
SDValue getIndexedStore(SDValue OrigStoe, SDLoc dl, SDValue Base,
@@ -837,8 +834,7 @@ public:
SDValue Op3, SDValue Op4);
SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4, SDValue Op5);
- SDNode *UpdateNodeOperands(SDNode *N,
- const SDValue *Ops, unsigned NumOps);
+ SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
/// SelectNodeTo - These are used for target selectors to *mutate* the
/// specified node to have the specified return type, Target opcode, and
@@ -851,15 +847,14 @@ public:
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
SDValue Op1, SDValue Op2, SDValue Op3);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
- const SDValue *Ops, unsigned NumOps);
+ ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1, EVT VT2);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
- EVT VT2, const SDValue *Ops, unsigned NumOps);
+ EVT VT2, ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
- EVT VT2, EVT VT3, const SDValue *Ops, unsigned NumOps);
+ EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
- EVT VT2, EVT VT3, EVT VT4, const SDValue *Ops,
- unsigned NumOps);
+ EVT VT2, EVT VT3, EVT VT4, ArrayRef<SDValue> Ops);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
EVT VT2, SDValue Op1);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
@@ -869,12 +864,12 @@ public:
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
EVT VT2, EVT VT3, SDValue Op1, SDValue Op2, SDValue Op3);
SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, SDVTList VTs,
- const SDValue *Ops, unsigned NumOps);
+ ArrayRef<SDValue> Ops);
/// MorphNodeTo - This *mutates* the specified node to have the specified
/// return type, opcode, and operands.
SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
- const SDValue *Ops, unsigned NumOps);
+ ArrayRef<SDValue> Ops);
/// getMachineNode - These are used for target selectors to create a new node
/// with specified return type(s), MachineInstr opcode, and operands.
@@ -927,17 +922,19 @@ public:
/// getNodeIfExists - Get the specified node if it's already available, or
/// else return NULL.
- SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTs,
- const SDValue *Ops, unsigned NumOps);
+ SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTs, ArrayRef<SDValue> Ops);
/// getDbgValue - Creates a SDDbgValue node.
///
- SDDbgValue *getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off,
- DebugLoc DL, unsigned O);
- SDDbgValue *getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
- DebugLoc DL, unsigned O);
- SDDbgValue *getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
+ SDDbgValue *getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R,
+ bool IsIndirect, uint64_t Off,
DebugLoc DL, unsigned O);
+ /// Constant.
+ SDDbgValue *getConstantDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
+ DebugLoc DL, unsigned O);
+ /// Frame index.
+ SDDbgValue *getFrameIndexDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
+ DebugLoc DL, unsigned O);
/// RemoveDeadNode - Remove the specified node from the system. If any of its
/// operands then becomes dead, remove them as well. Inform UpdateListener
@@ -1082,13 +1079,12 @@ public:
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth = 0)
const;
- /// ComputeMaskedBits - Determine which of the bits specified in Mask are
- /// known to be either zero or one and return them in the KnownZero/KnownOne
- /// bitsets. This code only analyzes bits in Mask, in order to short-circuit
- /// processing. Targets can implement the computeMaskedBitsForTargetNode
- /// method in the TargetLowering class to allow target nodes to be understood.
- void ComputeMaskedBits(SDValue Op, APInt &KnownZero, APInt &KnownOne,
- unsigned Depth = 0) const;
+ /// Determine which bits of Op are known to be either zero or one and return
+ /// them in the KnownZero/KnownOne bitsets. Targets can implement the
+ /// computeKnownBitsForTargetNode method in the TargetLowering class to allow
+ /// target nodes to be understood.
+ void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne,
+ unsigned Depth = 0) const;
/// ComputeNumSignBits - Return the number of times the sign bit of the
/// register is replicated into the other bits. We know that at least 1 bit
@@ -1160,21 +1156,27 @@ public:
return SplitVector(N->getOperand(OpNo), SDLoc(N));
}
+ /// ExtractVectorElements - Append the extracted elements from Start to Count
+ /// out of the vector Op in Args. If Count is 0, all of the elements will be
+ /// extracted.
+ void ExtractVectorElements(SDValue Op, SmallVectorImpl<SDValue> &Args,
+ unsigned Start = 0, unsigned Count = 0);
+
+ unsigned getEVTAlignment(EVT MemoryVT) const;
+
private:
bool RemoveNodeFromCSEMaps(SDNode *N);
void AddModifiedNodeToCSEMaps(SDNode *N);
SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
void *&InsertPos);
- SDNode *FindModifiedNodeSlot(SDNode *N, const SDValue *Ops, unsigned NumOps,
+ SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
void *&InsertPos);
SDNode *UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc loc);
void DeleteNodeNotInCSEMaps(SDNode *N);
void DeallocateNode(SDNode *N);
- unsigned getEVTAlignment(EVT MemoryVT) const;
-
void allnodes_clear();
/// VTList - List of non-single value types.
diff --git a/include/llvm/CodeGen/SelectionDAGISel.h b/include/llvm/CodeGen/SelectionDAGISel.h
index b92b6ec..520be40 100644
--- a/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/include/llvm/CodeGen/SelectionDAGISel.h
@@ -242,13 +242,15 @@ private:
// Calls to these functions are generated by tblgen.
SDNode *Select_INLINEASM(SDNode *N);
+ SDNode *Select_READ_REGISTER(SDNode *N);
+ SDNode *Select_WRITE_REGISTER(SDNode *N);
SDNode *Select_UNDEF(SDNode *N);
void CannotYetSelect(SDNode *N);
private:
void DoInstructionSelection();
SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
- const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo);
+ ArrayRef<SDValue> Ops, unsigned EmitNodeInfo);
void PrepareEHLandingPad();
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index fd915b0..4f0ddb7 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -19,6 +19,7 @@
#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
#define LLVM_CODEGEN_SELECTIONDAGNODES_H
+#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/STLExtras.h"
@@ -99,7 +100,7 @@ class SDValue {
SDNode *Node; // The node defining the value we are using.
unsigned ResNo; // Which return value of the node we are using.
public:
- SDValue() : Node(0), ResNo(0) {}
+ SDValue() : Node(nullptr), ResNo(0) {}
SDValue(SDNode *node, unsigned resno) : Node(node), ResNo(resno) {}
/// get the index which selects a specific result in the SDNode
@@ -233,7 +234,7 @@ class SDUse {
void operator=(const SDUse &U) LLVM_DELETED_FUNCTION;
public:
- SDUse() : Val(), User(NULL), Prev(NULL), Next(NULL) {}
+ SDUse() : Val(), User(nullptr), Prev(nullptr), Next(nullptr) {}
/// Normally SDUse will just implicitly convert to an SDValue that it holds.
operator const SDValue&() const { return Val; }
@@ -407,7 +408,7 @@ public:
/// use_empty - Return true if there are no uses of this node.
///
- bool use_empty() const { return UseList == NULL; }
+ bool use_empty() const { return UseList == nullptr; }
/// hasOneUse - Return true if there is exactly one use of this node.
///
@@ -457,7 +458,7 @@ public:
SDUse, ptrdiff_t>::pointer pointer;
use_iterator(const use_iterator &I) : Op(I.Op) {}
- use_iterator() : Op(0) {}
+ use_iterator() : Op(nullptr) {}
bool operator==(const use_iterator &x) const {
return Op == x.Op;
@@ -467,7 +468,7 @@ public:
}
/// atEnd - return true if this iterator is at the end of uses list.
- bool atEnd() const { return Op == 0; }
+ bool atEnd() const { return Op == nullptr; }
// Iterator traversal: forward iteration only.
use_iterator &operator++() { // Preincrement
@@ -505,8 +506,14 @@ public:
return use_iterator(UseList);
}
- static use_iterator use_end() { return use_iterator(0); }
+ static use_iterator use_end() { return use_iterator(nullptr); }
+ inline iterator_range<use_iterator> uses() {
+ return iterator_range<use_iterator>(use_begin(), use_end());
+ }
+ inline iterator_range<use_iterator> uses() const {
+ return iterator_range<use_iterator>(use_begin(), use_end());
+ }
/// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
/// indicated value. This method ignores uses of other values defined by this
@@ -579,7 +586,7 @@ public:
if (getNumOperands() != 0 &&
getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
return getOperand(getNumOperands()-1).getNode();
- return 0;
+ return nullptr;
}
// If this is a pseudo op, like copyfromreg, look to see if there is a
@@ -604,7 +611,7 @@ public:
for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
if (UI.getUse().get().getValueType() == MVT::Glue)
return *UI;
- return 0;
+ return nullptr;
}
/// getNumValues - Return the number of values defined/returned by this
@@ -637,12 +644,12 @@ public:
/// getOperationName - Return the opcode of this operation for printing.
///
- std::string getOperationName(const SelectionDAG *G = 0) const;
+ std::string getOperationName(const SelectionDAG *G = nullptr) const;
static const char* getIndexedModeName(ISD::MemIndexedMode AM);
void print_types(raw_ostream &OS, const SelectionDAG *G) const;
void print_details(raw_ostream &OS, const SelectionDAG *G) const;
- void print(raw_ostream &OS, const SelectionDAG *G = 0) const;
- void printr(raw_ostream &OS, const SelectionDAG *G = 0) const;
+ void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
+ void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
/// printrFull - Print a SelectionDAG node and all children down to
/// the leaves. The given SelectionDAG allows target-specific nodes
@@ -650,7 +657,7 @@ public:
/// print the whole DAG, including children that appear multiple
/// times.
///
- void printrFull(raw_ostream &O, const SelectionDAG *G = 0) const;
+ void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
/// printrWithDepth - Print a SelectionDAG node and children up to
/// depth "depth." The given SelectionDAG allows target-specific
@@ -658,7 +665,7 @@ public:
/// will print children that appear multiple times wherever they are
/// used.
///
- void printrWithDepth(raw_ostream &O, const SelectionDAG *G = 0,
+ void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
unsigned depth = 100) const;
@@ -683,14 +690,15 @@ public:
/// Unlike dumpr, this will print the whole DAG, including children
/// that appear multiple times.
///
- void dumprFull(const SelectionDAG *G = 0) const;
+ void dumprFull(const SelectionDAG *G = nullptr) const;
/// dumprWithDepth - printrWithDepth to dbgs(). The given
/// SelectionDAG allows target-specific nodes to be printed in
/// human-readable form. Unlike dumpr, this will print children
/// that appear multiple times wherever they are used.
///
- void dumprWithDepth(const SelectionDAG *G = 0, unsigned depth = 100) const;
+ void dumprWithDepth(const SelectionDAG *G = nullptr,
+ unsigned depth = 100) const;
/// Profile - Gather unique data for the node.
///
@@ -707,14 +715,14 @@ protected:
}
SDNode(unsigned Opc, unsigned Order, const DebugLoc dl, SDVTList VTs,
- const SDValue *Ops, unsigned NumOps)
+ ArrayRef<SDValue> Ops)
: NodeType(Opc), OperandsNeedDelete(true), HasDebugValue(false),
SubclassData(0), NodeId(-1),
- OperandList(NumOps ? new SDUse[NumOps] : 0),
- ValueList(VTs.VTs), UseList(NULL),
- NumOperands(NumOps), NumValues(VTs.NumVTs),
+ OperandList(Ops.size() ? new SDUse[Ops.size()] : nullptr),
+ ValueList(VTs.VTs), UseList(nullptr),
+ NumOperands(Ops.size()), NumValues(VTs.NumVTs),
debugLoc(dl), IROrder(Order) {
- for (unsigned i = 0; i != NumOps; ++i) {
+ for (unsigned i = 0; i != Ops.size(); ++i) {
OperandList[i].setUser(this);
OperandList[i].setInitial(Ops[i]);
}
@@ -725,9 +733,9 @@ protected:
/// set later with InitOperands.
SDNode(unsigned Opc, unsigned Order, const DebugLoc dl, SDVTList VTs)
: NodeType(Opc), OperandsNeedDelete(false), HasDebugValue(false),
- SubclassData(0), NodeId(-1), OperandList(0),
- ValueList(VTs.VTs), UseList(NULL), NumOperands(0), NumValues(VTs.NumVTs),
- debugLoc(dl), IROrder(Order) {}
+ SubclassData(0), NodeId(-1), OperandList(nullptr), ValueList(VTs.VTs),
+ UseList(nullptr), NumOperands(0), NumValues(VTs.NumVTs), debugLoc(dl),
+ IROrder(Order) {}
/// InitOperands - Initialize the operands list of this with 1 operand.
void InitOperands(SDUse *Ops, const SDValue &Op0) {
@@ -812,7 +820,7 @@ private:
int IROrder;
public:
- SDLoc() : Ptr(NULL), IROrder(0) {}
+ SDLoc() : Ptr(nullptr), IROrder(0) {}
SDLoc(const SDNode *N) : Ptr(N), IROrder(-1) {
assert(N && "null SDNode");
}
@@ -823,14 +831,14 @@ public:
assert(Order >= 0 && "bad IROrder");
}
unsigned getIROrder() {
- if (IROrder >= 0 || Ptr == NULL) {
+ if (IROrder >= 0 || Ptr == nullptr) {
return (unsigned)IROrder;
}
const SDNode *N = (const SDNode*)(Ptr);
return N->getIROrder();
}
DebugLoc getDebugLoc() {
- if (Ptr == NULL) {
+ if (!Ptr) {
return DebugLoc();
}
if (IROrder >= 0) {
@@ -990,8 +998,7 @@ public:
EVT MemoryVT, MachineMemOperand *MMO);
MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
- const SDValue *Ops,
- unsigned NumOps, EVT MemoryVT, MachineMemOperand *MMO);
+ ArrayRef<SDValue> Ops, EVT MemoryVT, MachineMemOperand *MMO);
bool readMem() const { return MMO->isLoad(); }
bool writeMem() const { return MMO->isStore(); }
@@ -1024,8 +1031,7 @@ public:
return SynchronizationScope((SubclassData >> 12) & 1);
}
- /// Returns the SrcValue and offset that describes the location of the access
- const Value *getSrcValue() const { return MMO->getValue(); }
+ // Returns the offset from the location of the access.
int64_t getSrcValueOffset() const { return MMO->getOffset(); }
/// Returns the TBAAInfo that describes the dereference.
@@ -1153,7 +1159,7 @@ public:
InitOperands(Ops, Chain, Ptr);
}
AtomicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTL, EVT MemVT,
- SDValue* AllOps, SDUse *DynOps, unsigned NumOps,
+ const SDValue* AllOps, SDUse *DynOps, unsigned NumOps,
MachineMemOperand *MMO,
AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope)
@@ -1208,9 +1214,9 @@ public:
class MemIntrinsicSDNode : public MemSDNode {
public:
MemIntrinsicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
- const SDValue *Ops, unsigned NumOps,
- EVT MemoryVT, MachineMemOperand *MMO)
- : MemSDNode(Opc, Order, dl, VTs, Ops, NumOps, MemoryVT, MMO) {
+ ArrayRef<SDValue> Ops, EVT MemoryVT,
+ MachineMemOperand *MMO)
+ : MemSDNode(Opc, Order, dl, VTs, Ops, MemoryVT, MMO) {
}
// Methods to support isa and dyn_cast
@@ -1680,11 +1686,10 @@ class CvtRndSatSDNode : public SDNode {
ISD::CvtCode CvtCode;
friend class SelectionDAG;
explicit CvtRndSatSDNode(EVT VT, unsigned Order, DebugLoc dl,
- const SDValue *Ops, unsigned NumOps,
- ISD::CvtCode Code)
- : SDNode(ISD::CONVERT_RNDSAT, Order, dl, getSDVTList(VT), Ops, NumOps),
+ ArrayRef<SDValue> Ops, ISD::CvtCode Code)
+ : SDNode(ISD::CONVERT_RNDSAT, Order, dl, getSDVTList(VT), Ops),
CvtCode(Code) {
- assert(NumOps == 5 && "wrong number of operations");
+ assert(Ops.size() == 5 && "wrong number of operations");
}
public:
ISD::CvtCode getCvtCode() const { return CvtCode; }
@@ -1827,7 +1832,7 @@ public:
private:
friend class SelectionDAG;
MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc DL, SDVTList VTs)
- : SDNode(Opc, Order, DL, VTs), MemRefs(0), MemRefsEnd(0) {}
+ : SDNode(Opc, Order, DL, VTs), MemRefs(nullptr), MemRefsEnd(nullptr) {}
/// LocalOperands - Operands for this instruction, if they fit here. If
/// they don't, this field is unused.
diff --git a/include/llvm/CodeGen/SlotIndexes.h b/include/llvm/CodeGen/SlotIndexes.h
index 1cc34d5..00bb22b 100644
--- a/include/llvm/CodeGen/SlotIndexes.h
+++ b/include/llvm/CodeGen/SlotIndexes.h
@@ -147,11 +147,11 @@ namespace llvm {
};
/// Construct an invalid index.
- SlotIndex() : lie(0, 0) {}
+ SlotIndex() : lie(nullptr, 0) {}
// Construct a new slot index from the given one, and set the slot.
SlotIndex(const SlotIndex &li, Slot s) : lie(li.listEntry(), unsigned(s)) {
- assert(lie.getPointer() != 0 &&
+ assert(lie.getPointer() != nullptr &&
"Attempt to construct index with 0 pointer.");
}
@@ -421,7 +421,7 @@ namespace llvm {
/// Returns the instruction for the given index, or null if the given
/// index has no instruction associated with it.
MachineInstr* getInstructionFromIndex(SlotIndex index) const {
- return index.isValid() ? index.listEntry()->getInstr() : 0;
+ return index.isValid() ? index.listEntry()->getInstr() : nullptr;
}
/// Returns the next non-null index, if one exists.
@@ -551,14 +551,14 @@ namespace llvm {
// Check that we don't cross the boundary into this block.
if (itr->first < end)
- return 0;
+ return nullptr;
itr = std::prev(itr);
if (itr->first <= start)
return itr->second;
- return 0;
+ return nullptr;
}
/// Insert the given machine instruction into the mapping. Returns the
@@ -574,7 +574,7 @@ namespace llvm {
// affected by debug information.
assert(!mi->isDebugValue() && "Cannot number DBG_VALUE instructions.");
- assert(mi->getParent() != 0 && "Instr must be added to function.");
+ assert(mi->getParent() != nullptr && "Instr must be added to function.");
// Get the entries where mi should be inserted.
IndexList::iterator prevItr, nextItr;
@@ -615,7 +615,7 @@ namespace llvm {
IndexListEntry *miEntry(mi2iItr->second.listEntry());
assert(miEntry->getInstr() == mi && "Instruction indexes broken.");
// FIXME: Eventually we want to actually delete these indexes.
- miEntry->setInstr(0);
+ miEntry->setInstr(nullptr);
mi2iMap.erase(mi2iItr);
}
}
@@ -640,15 +640,15 @@ namespace llvm {
MachineFunction::iterator nextMBB =
std::next(MachineFunction::iterator(mbb));
- IndexListEntry *startEntry = 0;
- IndexListEntry *endEntry = 0;
+ IndexListEntry *startEntry = nullptr;
+ IndexListEntry *endEntry = nullptr;
IndexList::iterator newItr;
if (nextMBB == mbb->getParent()->end()) {
startEntry = &indexList.back();
- endEntry = createEntry(0, 0);
+ endEntry = createEntry(nullptr, 0);
newItr = indexList.insertAfter(startEntry, endEntry);
} else {
- startEntry = createEntry(0, 0);
+ startEntry = createEntry(nullptr, 0);
endEntry = getMBBStartIdx(nextMBB).listEntry();
newItr = indexList.insert(endEntry, startEntry);
}
diff --git a/include/llvm/CodeGen/StackMaps.h b/include/llvm/CodeGen/StackMaps.h
index a62ab6e..5eddbb6 100644
--- a/include/llvm/CodeGen/StackMaps.h
+++ b/include/llvm/CodeGen/StackMaps.h
@@ -21,6 +21,7 @@ namespace llvm {
class AsmPrinter;
class MCExpr;
+class MCStreamer;
/// \brief MI-level patchpoint operands.
///
@@ -115,7 +116,7 @@ public:
// OpParser.
typedef enum { DirectMemRefOp, IndirectMemRefOp, ConstantOp } OpType;
- StackMaps(AsmPrinter &AP) : AP(AP) {}
+ StackMaps(AsmPrinter &AP);
/// \brief Generate a stackmap record for a stackmap instruction.
///
@@ -131,8 +132,11 @@ public:
void serializeToStackMapSection();
private:
+ static const char *WSMP;
+
typedef SmallVector<Location, 8> LocationVec;
typedef SmallVector<LiveOutReg, 8> LiveOutVec;
+ typedef MapVector<int64_t, int64_t> ConstantPool;
typedef MapVector<const MCSymbol *, uint64_t> FnStackSizeMap;
struct CallsiteInfo {
@@ -140,7 +144,7 @@ private:
uint64_t ID;
LocationVec Locations;
LiveOutVec LiveOuts;
- CallsiteInfo() : CSOffsetExpr(0), ID(0) {}
+ CallsiteInfo() : CSOffsetExpr(nullptr), ID(0) {}
CallsiteInfo(const MCExpr *CSOffsetExpr, uint64_t ID,
LocationVec &Locations, LiveOutVec &LiveOuts)
: CSOffsetExpr(CSOffsetExpr), ID(ID), Locations(Locations),
@@ -149,26 +153,6 @@ private:
typedef std::vector<CallsiteInfo> CallsiteInfoList;
- struct ConstantPool {
- private:
- typedef std::map<int64_t, size_t> ConstantsMap;
- std::vector<int64_t> ConstantsList;
- ConstantsMap ConstantIndexes;
-
- public:
- size_t getNumConstants() const { return ConstantsList.size(); }
- int64_t getConstant(size_t Idx) const { return ConstantsList[Idx]; }
- size_t getConstantIndex(int64_t ConstVal) {
- size_t NextIdx = ConstantsList.size();
- ConstantsMap::const_iterator I =
- ConstantIndexes.insert(ConstantIndexes.end(),
- std::make_pair(ConstVal, NextIdx));
- if (I->second == NextIdx)
- ConstantsList.push_back(ConstVal);
- return I->second;
- }
- };
-
AsmPrinter &AP;
CallsiteInfoList CSInfos;
ConstantPool ConstPool;
@@ -196,6 +180,18 @@ private:
MachineInstr::const_mop_iterator MOI,
MachineInstr::const_mop_iterator MOE,
bool recordResult = false);
+
+ /// \brief Emit the stackmap header.
+ void emitStackmapHeader(MCStreamer &OS);
+
+ /// \brief Emit the function frame record for each function.
+ void emitFunctionFrameRecords(MCStreamer &OS);
+
+ /// \brief Emit the constant pool.
+ void emitConstantPoolEntries(MCStreamer &OS);
+
+ /// \brief Emit the callsite info for each stackmap/patchpoint intrinsic call.
+ void emitCallsiteEntries(MCStreamer &OS, const TargetRegisterInfo *TRI);
};
}
diff --git a/include/llvm/CodeGen/StackProtector.h b/include/llvm/CodeGen/StackProtector.h
index 0b8b8c0..8cef85c 100644
--- a/include/llvm/CodeGen/StackProtector.h
+++ b/include/llvm/CodeGen/StackProtector.h
@@ -105,11 +105,12 @@ private:
public:
static char ID; // Pass identification, replacement for typeid.
- StackProtector() : FunctionPass(ID), TM(0), TLI(0), SSPBufferSize(0) {
+ StackProtector()
+ : FunctionPass(ID), TM(nullptr), TLI(nullptr), SSPBufferSize(0) {
initializeStackProtectorPass(*PassRegistry::getPassRegistry());
}
StackProtector(const TargetMachine *TM)
- : FunctionPass(ID), TM(TM), TLI(0), Trip(TM->getTargetTriple()),
+ : FunctionPass(ID), TM(TM), TLI(nullptr), Trip(TM->getTargetTriple()),
SSPBufferSize(8) {
initializeStackProtectorPass(*PassRegistry::getPassRegistry());
}
diff --git a/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
index 16fed32..9f1cbaa 100644
--- a/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
+++ b/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -67,10 +67,12 @@ public:
MachineModuleInfo *MMI) const override;
void InitializeELF(bool UseInitArray_);
- const MCSection *
- getStaticCtorSection(unsigned Priority = 65535) const override;
- const MCSection *
- getStaticDtorSection(unsigned Priority = 65535) const override;
+ const MCSection *getStaticCtorSection(unsigned Priority,
+ const MCSymbol *KeySym,
+ const MCSection *KeySec) const override;
+ const MCSection *getStaticDtorSection(unsigned Priority,
+ const MCSymbol *KeySym,
+ const MCSection *KeySec) const override;
};
@@ -140,6 +142,13 @@ public:
void emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
Mangler &Mang, const TargetMachine &TM) const override;
+
+ const MCSection *getStaticCtorSection(unsigned Priority,
+ const MCSymbol *KeySym,
+ const MCSection *KeySec) const override;
+ const MCSection *getStaticDtorSection(unsigned Priority,
+ const MCSymbol *KeySym,
+ const MCSection *KeySec) const override;
};
} // end namespace llvm
diff --git a/include/llvm/CodeGen/TargetSchedule.h b/include/llvm/CodeGen/TargetSchedule.h
index 4e178d0..690b70f 100644
--- a/include/llvm/CodeGen/TargetSchedule.h
+++ b/include/llvm/CodeGen/TargetSchedule.h
@@ -41,7 +41,7 @@ class TargetSchedModel {
unsigned MicroOpFactor; // Multiply to normalize microops to resource units.
unsigned ResourceLCM; // Resource units per cycle. Latency normalization factor.
public:
- TargetSchedModel(): STI(0), TII(0) {}
+ TargetSchedModel(): STI(nullptr), TII(nullptr) {}
/// \brief Initialize the machine model for instruction scheduling.
///
@@ -75,7 +75,7 @@ public:
const InstrItineraryData *getInstrItineraries() const {
if (hasInstrItineraries())
return &InstrItins;
- return 0;
+ return nullptr;
}
/// \brief Identify the processor corresponding to the current subtarget.
@@ -86,7 +86,7 @@ public:
/// \brief Return the number of issue slots required for this MI.
unsigned getNumMicroOps(const MachineInstr *MI,
- const MCSchedClassDesc *SC = 0) const;
+ const MCSchedClassDesc *SC = nullptr) const;
/// \brief Get the number of kinds of resources for this target.
unsigned getNumProcResourceKinds() const {
diff --git a/include/llvm/CodeGen/ValueTypes.h b/include/llvm/CodeGen/ValueTypes.h
index 8cf26fa..4e93940 100644
--- a/include/llvm/CodeGen/ValueTypes.h
+++ b/include/llvm/CodeGen/ValueTypes.h
@@ -35,9 +35,9 @@ namespace llvm {
public:
EVT() : V((MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE)),
- LLVMTy(0) {}
- EVT(MVT::SimpleValueType SVT) : V(SVT), LLVMTy(0) { }
- EVT(MVT S) : V(S), LLVMTy(0) {}
+ LLVMTy(nullptr) {}
+ EVT(MVT::SimpleValueType SVT) : V(SVT), LLVMTy(nullptr) { }
+ EVT(MVT S) : V(S), LLVMTy(nullptr) {}
bool operator==(EVT VT) const {
return !(*this != VT);
diff --git a/include/llvm/CodeGen/VirtRegMap.h b/include/llvm/CodeGen/VirtRegMap.h
index 89b5a9f..eceb875 100644
--- a/include/llvm/CodeGen/VirtRegMap.h
+++ b/include/llvm/CodeGen/VirtRegMap.h
@@ -177,7 +177,7 @@ namespace llvm {
/// the specified stack slot
void assignVirt2StackSlot(unsigned virtReg, int frameIndex);
- void print(raw_ostream &OS, const Module* M = 0) const override;
+ void print(raw_ostream &OS, const Module* M = nullptr) const override;
void dump() const;
};
diff --git a/include/llvm/DebugInfo/DIContext.h b/include/llvm/DebugInfo/DIContext.h
index 69a4f8d..c1aba01 100644
--- a/include/llvm/DebugInfo/DIContext.h
+++ b/include/llvm/DebugInfo/DIContext.h
@@ -16,42 +16,31 @@
#define LLVM_DEBUGINFO_DICONTEXT_H
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/RelocVisitor.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/DataTypes.h"
+#include <string>
+
namespace llvm {
class raw_ostream;
/// DILineInfo - a format-neutral container for source line information.
-class DILineInfo {
- SmallString<16> FileName;
- SmallString<16> FunctionName;
+struct DILineInfo {
+ std::string FileName;
+ std::string FunctionName;
uint32_t Line;
uint32_t Column;
-public:
+
DILineInfo()
- : FileName("<invalid>"), FunctionName("<invalid>"),
- Line(0), Column(0) {}
- DILineInfo(StringRef fileName, StringRef functionName, uint32_t line,
- uint32_t column)
- : FileName(fileName), FunctionName(functionName), Line(line),
- Column(column) {}
-
- const char *getFileName() { return FileName.c_str(); }
- const char *getFunctionName() { return FunctionName.c_str(); }
- uint32_t getLine() const { return Line; }
- uint32_t getColumn() const { return Column; }
+ : FileName("<invalid>"), FunctionName("<invalid>"), Line(0), Column(0) {}
bool operator==(const DILineInfo &RHS) const {
return Line == RHS.Line && Column == RHS.Column &&
- FileName.equals(RHS.FileName) &&
- FunctionName.equals(RHS.FunctionName);
+ FileName == RHS.FileName && FunctionName == RHS.FunctionName;
}
bool operator!=(const DILineInfo &RHS) const {
return !(*this == RHS);
@@ -79,19 +68,16 @@ class DIInliningInfo {
/// DILineInfoSpecifier - controls which fields of DILineInfo container
/// should be filled with data.
-class DILineInfoSpecifier {
- const uint32_t Flags; // Or'ed flags that set the info we want to fetch.
-public:
- enum Specification {
- FileLineInfo = 1 << 0,
- AbsoluteFilePath = 1 << 1,
- FunctionName = 1 << 2
- };
- // Use file/line info by default.
- DILineInfoSpecifier(uint32_t flags = FileLineInfo) : Flags(flags) {}
- bool needs(Specification spec) const {
- return (Flags & spec) > 0;
- }
+struct DILineInfoSpecifier {
+ enum class FileLineInfoKind { None, Default, AbsoluteFilePath };
+ enum class FunctionNameKind { None, ShortName, LinkageName };
+
+ FileLineInfoKind FLIKind;
+ FunctionNameKind FNKind;
+
+ DILineInfoSpecifier(FileLineInfoKind FLIKind = FileLineInfoKind::Default,
+ FunctionNameKind FNKind = FunctionNameKind::None)
+ : FLIKind(FLIKind), FNKind(FNKind) {}
};
/// Selects which debug sections get dumped.
diff --git a/include/llvm/DebugInfo/DWARFFormValue.h b/include/llvm/DebugInfo/DWARFFormValue.h
index 533d259..d517a72 100644
--- a/include/llvm/DebugInfo/DWARFFormValue.h
+++ b/include/llvm/DebugInfo/DWARFFormValue.h
@@ -36,7 +36,7 @@ public:
private:
struct ValueType {
- ValueType() : data(NULL) {
+ ValueType() : data(nullptr) {
uval = 0;
}
@@ -60,7 +60,7 @@ public:
bool extractValue(DataExtractor data, uint32_t *offset_ptr,
const DWARFUnit *u);
bool isInlinedCStr() const {
- return Value.data != NULL && Value.data == (const uint8_t*)Value.cstr;
+ return Value.data != nullptr && Value.data == (const uint8_t*)Value.cstr;
}
/// getAsFoo functions below return the extracted value as Foo if only
diff --git a/include/llvm/ExecutionEngine/ExecutionEngine.h b/include/llvm/ExecutionEngine/ExecutionEngine.h
index 4dca870..7518c1e 100644
--- a/include/llvm/ExecutionEngine/ExecutionEngine.h
+++ b/include/llvm/ExecutionEngine/ExecutionEngine.h
@@ -123,6 +123,9 @@ class ExecutionEngine {
/// using dlsym).
bool SymbolSearchingDisabled;
+ /// Whether the JIT should verify IR modules during compilation.
+ bool VerifyModules;
+
friend class EngineBuilder; // To allow access to JITCtor and InterpCtor.
protected:
@@ -181,7 +184,7 @@ public:
/// freeMachineCodeForFunction works.
static ExecutionEngine *create(Module *M,
bool ForceInterpreter = false,
- std::string *ErrorStr = 0,
+ std::string *ErrorStr = nullptr,
CodeGenOpt::Level OptLevel =
CodeGenOpt::Default,
bool GVsWithCode = true);
@@ -193,8 +196,8 @@ public:
/// Clients should make sure to initialize targets prior to calling this
/// function.
static ExecutionEngine *createJIT(Module *M,
- std::string *ErrorStr = 0,
- JITMemoryManager *JMM = 0,
+ std::string *ErrorStr = nullptr,
+ JITMemoryManager *JMM = nullptr,
CodeGenOpt::Level OptLevel =
CodeGenOpt::Default,
bool GVsWithCode = true,
@@ -219,10 +222,7 @@ public:
/// needed by another object.
///
/// MCJIT will take ownership of the ObjectFile.
- virtual void addObjectFile(object::ObjectFile *O) {
- llvm_unreachable(
- "ExecutionEngine subclass doesn't implement addObjectFile.");
- }
+ virtual void addObjectFile(std::unique_ptr<object::ObjectFile> O);
/// addArchive - Add an Archive to the execution engine.
///
@@ -411,7 +411,7 @@ public:
}
// The JIT overrides a version that actually does this.
- virtual void runJITOnFunction(Function *, MachineCodeInfo * = 0) { }
+ virtual void runJITOnFunction(Function *, MachineCodeInfo * = nullptr) { }
/// getGlobalValueAtAddress - Return the LLVM global value object that starts
/// at the specified address.
@@ -478,7 +478,7 @@ public:
}
/// Return the target machine (if available).
- virtual TargetMachine *getTargetMachine() { return NULL; }
+ virtual TargetMachine *getTargetMachine() { return nullptr; }
/// DisableLazyCompilation - When lazy compilation is off (the default), the
/// JIT will eagerly compile every function reachable from the argument to
@@ -525,6 +525,17 @@ public:
return SymbolSearchingDisabled;
}
+ /// Enable/Disable IR module verification.
+ ///
+ /// Note: Module verification is enabled by default in Debug builds, and
+ /// disabled by default in Release. Use this method to override the default.
+ void setVerifyModules(bool Verify) {
+ VerifyModules = Verify;
+ }
+ bool getVerifyModules() const {
+ return VerifyModules;
+ }
+
/// InstallLazyFunctionCreator - If an unknown function is needed, the
/// specified function pointer is invoked to create it. If it returns null,
/// the JIT will abort.
@@ -572,19 +583,28 @@ private:
std::string MCPU;
SmallVector<std::string, 4> MAttrs;
bool UseMCJIT;
+ bool VerifyModules;
/// InitEngine - Does the common initialization of default options.
void InitEngine() {
WhichEngine = EngineKind::Either;
- ErrorStr = NULL;
+ ErrorStr = nullptr;
OptLevel = CodeGenOpt::Default;
- MCJMM = NULL;
- JMM = NULL;
+ MCJMM = nullptr;
+ JMM = nullptr;
Options = TargetOptions();
AllocateGVsWithCode = false;
RelocModel = Reloc::Default;
CMModel = CodeModel::JITDefault;
UseMCJIT = false;
+
+ // IR module verification is enabled by default in debug builds, and disabled
+ // by default in release builds.
+#ifndef NDEBUG
+ VerifyModules = true;
+#else
+ VerifyModules = false;
+#endif
}
public:
@@ -610,7 +630,7 @@ public:
/// the setJITMemoryManager() option.
EngineBuilder &setMCJITMemoryManager(RTDyldMemoryManager *mcjmm) {
MCJMM = mcjmm;
- JMM = NULL;
+ JMM = nullptr;
return *this;
}
@@ -622,7 +642,7 @@ public:
/// memory manager. This option defaults to NULL. This option overrides
/// setMCJITMemoryManager() as well.
EngineBuilder &setJITMemoryManager(JITMemoryManager *jmm) {
- MCJMM = NULL;
+ MCJMM = nullptr;
JMM = jmm;
return *this;
}
@@ -694,6 +714,13 @@ public:
return *this;
}
+ /// setVerifyModules - Set whether the JIT implementation should verify
+ /// IR modules during compilation.
+ EngineBuilder &setVerifyModules(bool Verify) {
+ VerifyModules = Verify;
+ return *this;
+ }
+
/// setMAttrs - Set cpu-specific attributes.
template<typename StringSequence>
EngineBuilder &setMAttrs(const StringSequence &mattrs) {
diff --git a/include/llvm/ExecutionEngine/JITEventListener.h b/include/llvm/ExecutionEngine/JITEventListener.h
index 8daf2bd..99fe36c 100644
--- a/include/llvm/ExecutionEngine/JITEventListener.h
+++ b/include/llvm/ExecutionEngine/JITEventListener.h
@@ -98,11 +98,11 @@ public:
static JITEventListener *createIntelJITEventListener(
IntelJITEventsWrapper* AlternativeImpl);
#else
- static JITEventListener *createIntelJITEventListener() { return 0; }
+ static JITEventListener *createIntelJITEventListener() { return nullptr; }
static JITEventListener *createIntelJITEventListener(
IntelJITEventsWrapper* AlternativeImpl) {
- return 0;
+ return nullptr;
}
#endif // USE_INTEL_JITEVENTS
@@ -115,11 +115,11 @@ public:
OProfileWrapper* AlternativeImpl);
#else
- static JITEventListener *createOProfileJITEventListener() { return 0; }
+ static JITEventListener *createOProfileJITEventListener() { return nullptr; }
static JITEventListener *createOProfileJITEventListener(
OProfileWrapper* AlternativeImpl) {
- return 0;
+ return nullptr;
}
#endif // USE_OPROFILE
diff --git a/include/llvm/ExecutionEngine/ObjectImage.h b/include/llvm/ExecutionEngine/ObjectImage.h
index 1a13647..1fcedd8 100644
--- a/include/llvm/ExecutionEngine/ObjectImage.h
+++ b/include/llvm/ExecutionEngine/ObjectImage.h
@@ -36,9 +36,17 @@ public:
virtual object::symbol_iterator begin_symbols() const = 0;
virtual object::symbol_iterator end_symbols() const = 0;
+ iterator_range<object::symbol_iterator> symbols() const {
+ return iterator_range<object::symbol_iterator>(begin_symbols(),
+ end_symbols());
+ }
virtual object::section_iterator begin_sections() const = 0;
virtual object::section_iterator end_sections() const = 0;
+ iterator_range<object::section_iterator> sections() const {
+ return iterator_range<object::section_iterator>(begin_sections(),
+ end_sections());
+ }
virtual /* Triple::ArchType */ unsigned getArch() const = 0;
diff --git a/include/llvm/ExecutionEngine/RTDyldMemoryManager.h b/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
index 70dd1cb..b1d6810 100644
--- a/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
+++ b/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
@@ -114,7 +114,7 @@ public:
/// operations needed to reliably use the memory are also performed.
///
/// Returns true if an error occurred, false otherwise.
- virtual bool finalizeMemory(std::string *ErrMsg = 0) = 0;
+ virtual bool finalizeMemory(std::string *ErrMsg = nullptr) = 0;
};
// Create wrappers for C Binding types (see CBindingWrapping.h).
diff --git a/include/llvm/ExecutionEngine/RuntimeDyld.h b/include/llvm/ExecutionEngine/RuntimeDyld.h
index 8d7b81b..30c0d49 100644
--- a/include/llvm/ExecutionEngine/RuntimeDyld.h
+++ b/include/llvm/ExecutionEngine/RuntimeDyld.h
@@ -55,7 +55,7 @@ public:
/// Ownership of the input object is transferred to the ObjectImage
/// instance returned from this function if successful. In the case of load
/// failure, the input object will be deleted.
- ObjectImage *loadObject(object::ObjectFile *InputObject);
+ ObjectImage *loadObject(std::unique_ptr<object::ObjectFile> InputObject);
/// Get the address of our local copy of the symbol. This may or may not
/// be the address used for relocation (clients can copy the data around
diff --git a/include/llvm/ExecutionEngine/SectionMemoryManager.h b/include/llvm/ExecutionEngine/SectionMemoryManager.h
index f68028b..f24bb4d 100644
--- a/include/llvm/ExecutionEngine/SectionMemoryManager.h
+++ b/include/llvm/ExecutionEngine/SectionMemoryManager.h
@@ -72,7 +72,7 @@ public:
/// operations needed to reliably use the memory are also performed.
///
/// \returns true if an error occurred, false otherwise.
- bool finalizeMemory(std::string *ErrMsg = 0) override;
+ bool finalizeMemory(std::string *ErrMsg = nullptr) override;
/// \brief Invalidate instruction cache for code sections.
///
diff --git a/include/llvm/IR/Argument.h b/include/llvm/IR/Argument.h
index 7c1ebf6..3a63e1a 100644
--- a/include/llvm/IR/Argument.h
+++ b/include/llvm/IR/Argument.h
@@ -44,7 +44,7 @@ public:
///
/// If \p F is specified, the argument is inserted at the end of the argument
/// list for \p F.
- explicit Argument(Type *Ty, const Twine &Name = "", Function *F = 0);
+ explicit Argument(Type *Ty, const Twine &Name = "", Function *F = nullptr);
inline const Function *getParent() const { return Parent; }
inline Function *getParent() { return Parent; }
@@ -55,6 +55,10 @@ public:
/// For example in "void foo(int a, float b)" a is 0 and b is 1.
unsigned getArgNo() const;
+ /// \brief Return true if this argument has the nonnull attribute on it in
+ /// its containing function.
+ bool hasNonNullAttr() const;
+
/// \brief Return true if this argument has the byval attribute on it in its
/// containing function.
bool hasByValAttr() const;
diff --git a/include/llvm/IR/Attributes.h b/include/llvm/IR/Attributes.h
index 9eccf40..86f9cc8 100644
--- a/include/llvm/IR/Attributes.h
+++ b/include/llvm/IR/Attributes.h
@@ -86,6 +86,7 @@ public:
NoInline, ///< inline=never
NonLazyBind, ///< Function is called early and/or
///< often, so lazy binding isn't worthwhile
+ NonNull, ///< Pointer is known to be not null
NoRedZone, ///< Disable redzone
NoReturn, ///< Mark the function as not returning
NoUnwind, ///< Function doesn't unwind stack
@@ -116,7 +117,7 @@ private:
AttributeImpl *pImpl;
Attribute(AttributeImpl *A) : pImpl(A) {}
public:
- Attribute() : pImpl(0) {}
+ Attribute() : pImpl(nullptr) {}
//===--------------------------------------------------------------------===//
// Attribute Construction
@@ -232,7 +233,7 @@ private:
explicit AttributeSet(AttributeSetImpl *LI) : pImpl(LI) {}
public:
- AttributeSet() : pImpl(0) {}
+ AttributeSet() : pImpl(nullptr) {}
//===--------------------------------------------------------------------===//
// AttributeSet Construction and Mutation
@@ -242,7 +243,7 @@ public:
static AttributeSet get(LLVMContext &C, ArrayRef<AttributeSet> Attrs);
static AttributeSet get(LLVMContext &C, unsigned Index,
ArrayRef<Attribute::AttrKind> Kind);
- static AttributeSet get(LLVMContext &C, unsigned Index, AttrBuilder &B);
+ static AttributeSet get(LLVMContext &C, unsigned Index, const AttrBuilder &B);
/// \brief Add an attribute to the attribute set at the given index. Since
/// attribute sets are immutable, this returns a new set.
@@ -469,6 +470,8 @@ public:
typedef std::pair<std::string, std::string> td_type;
typedef std::map<std::string, std::string>::iterator td_iterator;
typedef std::map<std::string, std::string>::const_iterator td_const_iterator;
+ typedef llvm::iterator_range<td_iterator> td_range;
+ typedef llvm::iterator_range<td_const_iterator> td_const_range;
td_iterator td_begin() { return TargetDepAttrs.begin(); }
td_iterator td_end() { return TargetDepAttrs.end(); }
@@ -476,6 +479,11 @@ public:
td_const_iterator td_begin() const { return TargetDepAttrs.begin(); }
td_const_iterator td_end() const { return TargetDepAttrs.end(); }
+ td_range td_attrs() { return td_range(td_begin(), td_end()); }
+ td_const_range td_attrs() const {
+ return td_const_range(td_begin(), td_end());
+ }
+
bool td_empty() const { return TargetDepAttrs.empty(); }
bool operator==(const AttrBuilder &B);
diff --git a/include/llvm/IR/BasicBlock.h b/include/llvm/IR/BasicBlock.h
index 1adc254..a19489a 100644
--- a/include/llvm/IR/BasicBlock.h
+++ b/include/llvm/IR/BasicBlock.h
@@ -90,7 +90,8 @@ private:
/// inserted at either the end of the function (if InsertBefore is null), or
/// before the specified basic block.
explicit BasicBlock(LLVMContext &C, const Twine &Name = "",
- Function *Parent = 0, BasicBlock *InsertBefore = 0);
+ Function *Parent = nullptr,
+ BasicBlock *InsertBefore = nullptr);
public:
/// \brief Get the context in which this basic block lives.
LLVMContext &getContext() const;
@@ -107,7 +108,8 @@ public:
/// inserted at either the end of the function (if InsertBefore is 0), or
/// before the specified basic block.
static BasicBlock *Create(LLVMContext &Context, const Twine &Name = "",
- Function *Parent = 0,BasicBlock *InsertBefore = 0) {
+ Function *Parent = nullptr,
+ BasicBlock *InsertBefore = nullptr) {
return new BasicBlock(Context, Name, Parent, InsertBefore);
}
~BasicBlock();
@@ -172,14 +174,15 @@ public:
void moveAfter(BasicBlock *MovePos);
- /// \brief Return this block if it has a single predecessor block. Otherwise
- /// return a null pointer.
+ /// \brief Return the predecessor of this block if it has a single predecessor
+ /// block. Otherwise return a null pointer.
BasicBlock *getSinglePredecessor();
const BasicBlock *getSinglePredecessor() const {
return const_cast<BasicBlock*>(this)->getSinglePredecessor();
}
- /// \brief Return this block if it has a unique predecessor block. Otherwise return a null pointer.
+ /// \brief Return the predecessor of this block if it has a unique predecessor
+ /// block. Otherwise return a null pointer.
///
/// Note that unique predecessor doesn't mean single edge, there can be
/// multiple edges from the unique predecessor to this block (for example a
diff --git a/include/llvm/IR/CallSite.h b/include/llvm/IR/CallSite.h
index ec46103..deea415 100644
--- a/include/llvm/IR/CallSite.h
+++ b/include/llvm/IR/CallSite.h
@@ -47,7 +47,7 @@ class CallSiteBase {
protected:
PointerIntPair<InstrTy*, 1, bool> I;
public:
- CallSiteBase() : I(0, false) {}
+ CallSiteBase() : I(nullptr, false) {}
CallSiteBase(CallTy *CI) : I(CI, true) { assert(CI); }
CallSiteBase(InvokeTy *II) : I(II, false) { assert(II); }
CallSiteBase(ValTy *II) { *this = get(II); }
@@ -160,6 +160,17 @@ public:
///
FunTy *getCaller() const { return (*this)->getParent()->getParent(); }
+ /// \brief Tests if this call site must be tail call optimized. Only a
+ /// CallInst can be tail call optimized.
+ bool isMustTailCall() const {
+ return isCall() && cast<CallInst>(getInstruction())->isMustTailCall();
+ }
+
+ /// \brief Tests if this call site is marked as a tail call.
+ bool isTailCall() const {
+ return isCall() && cast<CallInst>(getInstruction())->isTailCall();
+ }
+
#define CALLSITE_DELEGATE_GETTER(METHOD) \
InstrTy *II = getInstruction(); \
return isCall() \
diff --git a/include/llvm/IR/CallingConv.h b/include/llvm/IR/CallingConv.h
index af44e8a..1eaf4f7 100644
--- a/include/llvm/IR/CallingConv.h
+++ b/include/llvm/IR/CallingConv.h
@@ -137,13 +137,7 @@ namespace CallingConv {
/// convention differs from the more common \c X86_64_SysV convention
/// in a number of ways, most notably in that XMM registers used to pass
/// arguments are shadowed by GPRs, and vice versa.
- X86_64_Win64 = 79,
-
- /// \brief The calling convention used for __cdecl methods on win32.
- /// Differs from the C calling convention only in that the order of the
- /// first parameter and the sret parameter are swapped.
- X86_CDeclMethod = 80
-
+ X86_64_Win64 = 79
};
} // End CallingConv namespace
diff --git a/include/llvm/IR/ConstantRange.h b/include/llvm/IR/ConstantRange.h
index 86988de..342422c 100644
--- a/include/llvm/IR/ConstantRange.h
+++ b/include/llvm/IR/ConstantRange.h
@@ -114,12 +114,12 @@ public:
const APInt *getSingleElement() const {
if (Upper == Lower + 1)
return &Lower;
- return 0;
+ return nullptr;
}
/// isSingleElement - Return true if this set contains exactly one member.
///
- bool isSingleElement() const { return getSingleElement() != 0; }
+ bool isSingleElement() const { return getSingleElement() != nullptr; }
/// getSetSize - Return the number of elements in this set.
///
diff --git a/include/llvm/IR/Constants.h b/include/llvm/IR/Constants.h
index ed7a70f..0e72f04 100644
--- a/include/llvm/IR/Constants.h
+++ b/include/llvm/IR/Constants.h
@@ -299,7 +299,7 @@ class ConstantAggregateZero : public Constant {
ConstantAggregateZero(const ConstantAggregateZero &) LLVM_DELETED_FUNCTION;
protected:
explicit ConstantAggregateZero(Type *ty)
- : Constant(ty, ConstantAggregateZeroVal, 0, 0) {}
+ : Constant(ty, ConstantAggregateZeroVal, nullptr, 0) {}
protected:
// allocate space for exactly zero operands
void *operator new(size_t s) {
@@ -486,7 +486,7 @@ class ConstantPointerNull : public Constant {
protected:
explicit ConstantPointerNull(PointerType *T)
: Constant(T,
- Value::ConstantPointerNullVal, 0, 0) {}
+ Value::ConstantPointerNullVal, nullptr, 0) {}
protected:
// allocate space for exactly zero operands
@@ -536,7 +536,7 @@ class ConstantDataSequential : public Constant {
ConstantDataSequential(const ConstantDataSequential &) LLVM_DELETED_FUNCTION;
protected:
explicit ConstantDataSequential(Type *ty, ValueTy VT, const char *Data)
- : Constant(ty, VT, 0, 0), DataElements(Data), Next(0) {}
+ : Constant(ty, VT, nullptr, 0), DataElements(Data), Next(nullptr) {}
~ConstantDataSequential() { delete Next; }
static Constant *getImpl(StringRef Bytes, Type *Ty);
@@ -1136,7 +1136,7 @@ class UndefValue : public Constant {
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
UndefValue(const UndefValue &) LLVM_DELETED_FUNCTION;
protected:
- explicit UndefValue(Type *T) : Constant(T, UndefValueVal, 0, 0) {}
+ explicit UndefValue(Type *T) : Constant(T, UndefValueVal, nullptr, 0) {}
protected:
// allocate space for exactly zero operands
void *operator new(size_t s) {
diff --git a/include/llvm/IR/DIBuilder.h b/include/llvm/IR/DIBuilder.h
index 7d87a69..8b05bbb 100644
--- a/include/llvm/IR/DIBuilder.h
+++ b/include/llvm/IR/DIBuilder.h
@@ -78,7 +78,7 @@ namespace llvm {
DITemplateValueParameter
createTemplateValueParameter(unsigned Tag, DIDescriptor Scope,
StringRef Name, DIType Ty, Value *Val,
- MDNode *File = 0, unsigned LineNo = 0,
+ MDNode *File = nullptr, unsigned LineNo = 0,
unsigned ColumnNo = 0);
DIBuilder(const DIBuilder &) LLVM_DELETED_FUNCTION;
@@ -293,7 +293,7 @@ namespace llvm {
uint64_t OffsetInBits, unsigned Flags,
DIType DerivedFrom, DIArray Elements,
DIType VTableHolder = DIType(),
- MDNode *TemplateParms = 0,
+ MDNode *TemplateParms = nullptr,
StringRef UniqueIdentifier = StringRef());
/// createStructType - Create debugging information entry for a struct.
@@ -342,7 +342,7 @@ namespace llvm {
/// @param ColumnNo Column Number.
DITemplateTypeParameter
createTemplateTypeParameter(DIDescriptor Scope, StringRef Name, DIType Ty,
- MDNode *File = 0, unsigned LineNo = 0,
+ MDNode *File = nullptr, unsigned LineNo = 0,
unsigned ColumnNo = 0);
/// createTemplateValueParameter - Create debugging information for template
@@ -356,7 +356,7 @@ namespace llvm {
/// @param ColumnNo Column Number.
DITemplateValueParameter
createTemplateValueParameter(DIDescriptor Scope, StringRef Name,
- DIType Ty, Value *Val, MDNode *File = 0,
+ DIType Ty, Value *Val, MDNode *File = nullptr,
unsigned LineNo = 0, unsigned ColumnNo = 0);
/// \brief Create debugging information for a template template parameter.
@@ -369,8 +369,9 @@ namespace llvm {
/// @param ColumnNo Column Number.
DITemplateValueParameter
createTemplateTemplateParameter(DIDescriptor Scope, StringRef Name,
- DIType Ty, StringRef Val, MDNode *File = 0,
- unsigned LineNo = 0, unsigned ColumnNo = 0);
+ DIType Ty, StringRef Val,
+ MDNode *File = nullptr, unsigned LineNo = 0,
+ unsigned ColumnNo = 0);
/// \brief Create debugging information for a template parameter pack.
/// @param Scope Scope in which this type is defined.
@@ -382,7 +383,7 @@ namespace llvm {
/// @param ColumnNo Column Number.
DITemplateValueParameter
createTemplateParameterPack(DIDescriptor Scope, StringRef Name,
- DIType Ty, DIArray Val, MDNode *File = 0,
+ DIType Ty, DIArray Val, MDNode *File = nullptr,
unsigned LineNo = 0, unsigned ColumnNo = 0);
/// createArrayType - Create debugging information entry for an array.
@@ -433,7 +434,7 @@ namespace llvm {
/// flag set.
DIType createObjectPointerType(DIType Ty);
- /// createForwardDecl - Create a temporary forward-declared type.
+ /// \brief Create a permanent forward-declared type.
DICompositeType createForwardDecl(unsigned Tag, StringRef Name,
DIDescriptor Scope, DIFile F,
unsigned Line, unsigned RuntimeLang = 0,
@@ -441,6 +442,12 @@ namespace llvm {
uint64_t AlignInBits = 0,
StringRef UniqueIdentifier = StringRef());
+ /// \brief Create a temporary forward-declared type.
+ DICompositeType createReplaceableForwardDecl(
+ unsigned Tag, StringRef Name, DIDescriptor Scope, DIFile F,
+ unsigned Line, unsigned RuntimeLang = 0, uint64_t SizeInBits = 0,
+ uint64_t AlignInBits = 0, StringRef UniqueIdentifier = StringRef());
+
/// retainType - Retain DIType in a module even if it is not referenced
/// through debug info anchors.
void retainType(DIType T);
@@ -498,7 +505,7 @@ namespace llvm {
createStaticVariable(DIDescriptor Context, StringRef Name,
StringRef LinkageName, DIFile File, unsigned LineNo,
DITypeRef Ty, bool isLocalToUnit, llvm::Value *Val,
- MDNode *Decl = NULL);
+ MDNode *Decl = nullptr);
/// createLocalVariable - Create a new descriptor for the specified
@@ -564,9 +571,9 @@ namespace llvm {
unsigned ScopeLine,
unsigned Flags = 0,
bool isOptimized = false,
- Function *Fn = 0,
- MDNode *TParam = 0,
- MDNode *Decl = 0);
+ Function *Fn = nullptr,
+ MDNode *TParam = nullptr,
+ MDNode *Decl = nullptr);
/// FIXME: this is added for dragonegg. Once we update dragonegg
/// to call resolve function, this will be removed.
@@ -578,9 +585,9 @@ namespace llvm {
unsigned ScopeLine,
unsigned Flags = 0,
bool isOptimized = false,
- Function *Fn = 0,
- MDNode *TParam = 0,
- MDNode *Decl = 0);
+ Function *Fn = nullptr,
+ MDNode *TParam = nullptr,
+ MDNode *Decl = nullptr);
/// createMethod - Create a new descriptor for the specified C++ method.
/// See comments in DISubprogram for descriptions of these fields.
@@ -610,8 +617,8 @@ namespace llvm {
DIType VTableHolder = DIType(),
unsigned Flags = 0,
bool isOptimized = false,
- Function *Fn = 0,
- MDNode *TParam = 0);
+ Function *Fn = nullptr,
+ MDNode *TParam = nullptr);
/// createNameSpace - This creates new descriptor for a namespace
/// with the specified parent scope.
@@ -647,24 +654,27 @@ namespace llvm {
/// @param NS The namespace being imported here
/// @param Line Line number
DIImportedEntity createImportedModule(DIScope Context, DINameSpace NS,
- unsigned Line,
- StringRef Name = StringRef());
+ unsigned Line);
/// \brief Create a descriptor for an imported module.
/// @param Context The scope this module is imported into
/// @param NS An aliased namespace
/// @param Line Line number
DIImportedEntity createImportedModule(DIScope Context, DIImportedEntity NS,
- unsigned Line, StringRef Name);
+ unsigned Line);
/// \brief Create a descriptor for an imported function.
/// @param Context The scope this module is imported into
/// @param Decl The declaration (or definition) of a function, type, or
/// variable
/// @param Line Line number
+ DIImportedEntity createImportedDeclaration(DIScope Context, DIScope Decl,
+ unsigned Line,
+ StringRef Name = StringRef());
DIImportedEntity createImportedDeclaration(DIScope Context,
- DIScope Decl,
- unsigned Line);
+ DIImportedEntity NS,
+ unsigned Line,
+ StringRef Name = StringRef());
/// insertDeclare - Insert a new llvm.dbg.declare intrinsic call.
/// @param Storage llvm::Value of the variable
diff --git a/include/llvm/IR/DataLayout.h b/include/llvm/IR/DataLayout.h
index 59dca63..3079dec 100644
--- a/include/llvm/IR/DataLayout.h
+++ b/include/llvm/IR/DataLayout.h
@@ -27,6 +27,9 @@
#include "llvm/Pass.h"
#include "llvm/Support/DataTypes.h"
+// this needs to be outside of the namespace, to avoid conflict with llvm-c decl
+typedef struct LLVMOpaqueTargetData *LLVMTargetDataRef;
+
namespace llvm {
class Value;
@@ -174,14 +177,14 @@ private:
public:
/// Constructs a DataLayout from a specification string. See reset().
- explicit DataLayout(StringRef LayoutDescription) : LayoutMap(0) {
+ explicit DataLayout(StringRef LayoutDescription) : LayoutMap(nullptr) {
reset(LayoutDescription);
}
/// Initialize target data from properties stored in the module.
explicit DataLayout(const Module *M);
- DataLayout(const DataLayout &DL) : LayoutMap(0) { *this = DL; }
+ DataLayout(const DataLayout &DL) : LayoutMap(nullptr) { *this = DL; }
DataLayout &operator=(const DataLayout &DL) {
clear();
@@ -408,7 +411,7 @@ public:
/// none are set.
Type *getLargestLegalIntType(LLVMContext &C) const {
unsigned LargestSize = getLargestLegalIntTypeSize();
- return (LargestSize == 0) ? 0 : Type::getIntNTy(C, LargestSize);
+ return (LargestSize == 0) ? nullptr : Type::getIntNTy(C, LargestSize);
}
/// getLargestLegalIntType - Return the size of largest legal integer type
@@ -445,6 +448,14 @@ public:
}
};
+inline DataLayout *unwrap(LLVMTargetDataRef P) {
+ return reinterpret_cast<DataLayout*>(P);
+}
+
+inline LLVMTargetDataRef wrap(const DataLayout *P) {
+ return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout*>(P));
+}
+
class DataLayoutPass : public ImmutablePass {
DataLayout DL;
diff --git a/include/llvm/IR/DebugInfo.h b/include/llvm/IR/DebugInfo.h
index f7244b8..65e0a06 100644
--- a/include/llvm/IR/DebugInfo.h
+++ b/include/llvm/IR/DebugInfo.h
@@ -104,7 +104,7 @@ protected:
void replaceFunctionField(unsigned Elt, Function *F);
public:
- explicit DIDescriptor(const MDNode *N = 0) : DbgNode(N) {}
+ explicit DIDescriptor(const MDNode *N = nullptr) : DbgNode(N) {}
bool Verify() const;
@@ -116,7 +116,7 @@ public:
// FIXME: This operator bool isn't actually protecting anything at the
// moment due to the conversion operator above making DIDescriptor nodes
// implicitly convertable to bool.
- LLVM_EXPLICIT operator bool() const { return DbgNode != 0; }
+ LLVM_EXPLICIT operator bool() const { return DbgNode != nullptr; }
bool operator==(DIDescriptor Other) const { return DbgNode == Other.DbgNode; }
bool operator!=(DIDescriptor Other) const { return !operator==(Other); }
@@ -159,7 +159,7 @@ class DISubrange : public DIDescriptor {
void printInternal(raw_ostream &OS) const;
public:
- explicit DISubrange(const MDNode *N = 0) : DIDescriptor(N) {}
+ explicit DISubrange(const MDNode *N = nullptr) : DIDescriptor(N) {}
int64_t getLo() const { return getInt64Field(1); }
int64_t getCount() const { return getInt64Field(2); }
@@ -169,7 +169,7 @@ public:
/// DIArray - This descriptor holds an array of descriptors.
class DIArray : public DIDescriptor {
public:
- explicit DIArray(const MDNode *N = 0) : DIDescriptor(N) {}
+ explicit DIArray(const MDNode *N = nullptr) : DIDescriptor(N) {}
unsigned getNumElements() const;
DIDescriptor getElement(unsigned Idx) const {
@@ -185,7 +185,7 @@ class DIEnumerator : public DIDescriptor {
void printInternal(raw_ostream &OS) const;
public:
- explicit DIEnumerator(const MDNode *N = 0) : DIDescriptor(N) {}
+ explicit DIEnumerator(const MDNode *N = nullptr) : DIDescriptor(N) {}
StringRef getName() const { return getStringField(1); }
int64_t getEnumValue() const { return getInt64Field(2); }
@@ -210,7 +210,7 @@ protected:
void printInternal(raw_ostream &OS) const;
public:
- explicit DIScope(const MDNode *N = 0) : DIDescriptor(N) {}
+ explicit DIScope(const MDNode *N = nullptr) : DIDescriptor(N) {}
/// Gets the parent scope for this scope node or returns a
/// default constructed scope.
@@ -292,7 +292,7 @@ protected:
void printInternal(raw_ostream &OS) const;
public:
- explicit DIType(const MDNode *N = 0) : DIScope(N) {}
+ explicit DIType(const MDNode *N = nullptr) : DIScope(N) {}
operator DITypeRef () const {
assert(isType() &&
"constructing DITypeRef from an MDNode that is not a type");
@@ -339,14 +339,14 @@ public:
/// replaceAllUsesWith - Replace all uses of debug info referenced by
/// this descriptor.
- void replaceAllUsesWith(DIDescriptor &D);
+ void replaceAllUsesWith(LLVMContext &VMContext, DIDescriptor D);
void replaceAllUsesWith(MDNode *D);
};
/// DIBasicType - A basic type, like 'int' or 'float'.
class DIBasicType : public DIType {
public:
- explicit DIBasicType(const MDNode *N = 0) : DIType(N) {}
+ explicit DIBasicType(const MDNode *N = nullptr) : DIType(N) {}
unsigned getEncoding() const { return getUnsignedField(9); }
@@ -362,7 +362,7 @@ class DIDerivedType : public DIType {
void printInternal(raw_ostream &OS) const;
public:
- explicit DIDerivedType(const MDNode *N = 0) : DIType(N) {}
+ explicit DIDerivedType(const MDNode *N = nullptr) : DIType(N) {}
DITypeRef getTypeDerivedFrom() const { return getFieldAs<DITypeRef>(9); }
@@ -395,7 +395,7 @@ class DICompositeType : public DIDerivedType {
void printInternal(raw_ostream &OS) const;
public:
- explicit DICompositeType(const MDNode *N = 0) : DIDerivedType(N) {}
+ explicit DICompositeType(const MDNode *N = nullptr) : DIDerivedType(N) {}
DIArray getTypeArray() const { return getFieldAs<DIArray>(10); }
void setTypeArray(DIArray Elements, DIArray TParams = DIArray());
@@ -414,7 +414,7 @@ class DIFile : public DIScope {
friend class DIDescriptor;
public:
- explicit DIFile(const MDNode *N = 0) : DIScope(N) {}
+ explicit DIFile(const MDNode *N = nullptr) : DIScope(N) {}
MDNode *getFileNode() const;
bool Verify() const;
};
@@ -425,9 +425,11 @@ class DICompileUnit : public DIScope {
void printInternal(raw_ostream &OS) const;
public:
- explicit DICompileUnit(const MDNode *N = 0) : DIScope(N) {}
+ explicit DICompileUnit(const MDNode *N = nullptr) : DIScope(N) {}
- unsigned getLanguage() const { return getUnsignedField(2); }
+ dwarf::SourceLanguage getLanguage() const {
+ return static_cast<dwarf::SourceLanguage>(getUnsignedField(2));
+ }
StringRef getProducer() const { return getStringField(3); }
bool isOptimized() const { return getUnsignedField(4) != 0; }
@@ -453,7 +455,7 @@ class DISubprogram : public DIScope {
void printInternal(raw_ostream &OS) const;
public:
- explicit DISubprogram(const MDNode *N = 0) : DIScope(N) {}
+ explicit DISubprogram(const MDNode *N = nullptr) : DIScope(N) {}
DIScopeRef getContext() const { return getFieldAs<DIScopeRef>(2); }
StringRef getName() const { return getStringField(3); }
@@ -532,7 +534,7 @@ public:
/// DILexicalBlock - This is a wrapper for a lexical block.
class DILexicalBlock : public DIScope {
public:
- explicit DILexicalBlock(const MDNode *N = 0) : DIScope(N) {}
+ explicit DILexicalBlock(const MDNode *N = nullptr) : DIScope(N) {}
DIScope getContext() const { return getFieldAs<DIScope>(2); }
unsigned getLineNumber() const { return getUnsignedField(3); }
unsigned getColumnNumber() const { return getUnsignedField(4); }
@@ -544,7 +546,7 @@ public:
/// a filename change.
class DILexicalBlockFile : public DIScope {
public:
- explicit DILexicalBlockFile(const MDNode *N = 0) : DIScope(N) {}
+ explicit DILexicalBlockFile(const MDNode *N = nullptr) : DIScope(N) {}
DIScope getContext() const {
if (getScope().isSubprogram())
return getScope();
@@ -562,7 +564,7 @@ class DINameSpace : public DIScope {
void printInternal(raw_ostream &OS) const;
public:
- explicit DINameSpace(const MDNode *N = 0) : DIScope(N) {}
+ explicit DINameSpace(const MDNode *N = nullptr) : DIScope(N) {}
DIScope getContext() const { return getFieldAs<DIScope>(2); }
StringRef getName() const { return getStringField(3); }
unsigned getLineNumber() const { return getUnsignedField(4); }
@@ -572,14 +574,16 @@ public:
/// DIUnspecifiedParameter - This is a wrapper for unspecified parameters.
class DIUnspecifiedParameter : public DIDescriptor {
public:
- explicit DIUnspecifiedParameter(const MDNode *N = 0) : DIDescriptor(N) {}
+ explicit DIUnspecifiedParameter(const MDNode *N = nullptr)
+ : DIDescriptor(N) {}
bool Verify() const;
};
/// DITemplateTypeParameter - This is a wrapper for template type parameter.
class DITemplateTypeParameter : public DIDescriptor {
public:
- explicit DITemplateTypeParameter(const MDNode *N = 0) : DIDescriptor(N) {}
+ explicit DITemplateTypeParameter(const MDNode *N = nullptr)
+ : DIDescriptor(N) {}
DIScopeRef getContext() const { return getFieldAs<DIScopeRef>(1); }
StringRef getName() const { return getStringField(2); }
@@ -596,7 +600,8 @@ public:
/// DITemplateValueParameter - This is a wrapper for template value parameter.
class DITemplateValueParameter : public DIDescriptor {
public:
- explicit DITemplateValueParameter(const MDNode *N = 0) : DIDescriptor(N) {}
+ explicit DITemplateValueParameter(const MDNode *N = nullptr)
+ : DIDescriptor(N) {}
DIScopeRef getContext() const { return getFieldAs<DIScopeRef>(1); }
StringRef getName() const { return getStringField(2); }
@@ -617,7 +622,7 @@ class DIGlobalVariable : public DIDescriptor {
void printInternal(raw_ostream &OS) const;
public:
- explicit DIGlobalVariable(const MDNode *N = 0) : DIDescriptor(N) {}
+ explicit DIGlobalVariable(const MDNode *N = nullptr) : DIDescriptor(N) {}
DIScope getContext() const { return getFieldAs<DIScope>(2); }
StringRef getName() const { return getStringField(3); }
@@ -650,7 +655,7 @@ class DIVariable : public DIDescriptor {
void printInternal(raw_ostream &OS) const;
public:
- explicit DIVariable(const MDNode *N = 0) : DIDescriptor(N) {}
+ explicit DIVariable(const MDNode *N = nullptr) : DIDescriptor(N) {}
DIScope getContext() const { return getFieldAs<DIScope>(1); }
StringRef getName() const { return getStringField(2); }
diff --git a/include/llvm/IR/DebugLoc.h b/include/llvm/IR/DebugLoc.h
index 50b5d54..6d769d4 100644
--- a/include/llvm/IR/DebugLoc.h
+++ b/include/llvm/IR/DebugLoc.h
@@ -21,6 +21,7 @@ namespace llvm {
template <typename T> struct DenseMapInfo;
class MDNode;
class LLVMContext;
+ class raw_ostream;
/// DebugLoc - Debug location id. This is carried by Instruction, SDNode,
/// and MachineInstr to compactly encode file/line/scope information for an
@@ -58,7 +59,7 @@ namespace llvm {
/// get - Get a new DebugLoc that corresponds to the specified line/col
/// scope/inline location.
static DebugLoc get(unsigned Line, unsigned Col,
- MDNode *Scope, MDNode *InlinedAt = 0);
+ MDNode *Scope, MDNode *InlinedAt = nullptr);
/// getFromDILocation - Translate the DILocation quad into a DebugLoc.
static DebugLoc getFromDILocation(MDNode *N);
@@ -106,6 +107,8 @@ namespace llvm {
bool operator!=(const DebugLoc &DL) const { return !(*this == DL); }
void dump(const LLVMContext &Ctx) const;
+ /// \brief prints source location /path/to/file.exe:line:col @[inlined at]
+ void print(const LLVMContext &Ctx, raw_ostream &OS) const;
};
template <>
diff --git a/include/llvm/IR/DerivedTypes.h b/include/llvm/IR/DerivedTypes.h
index 71d9973..ff15087 100644
--- a/include/llvm/IR/DerivedTypes.h
+++ b/include/llvm/IR/DerivedTypes.h
@@ -188,7 +188,7 @@ class StructType : public CompositeType {
StructType(const StructType &) LLVM_DELETED_FUNCTION;
const StructType &operator=(const StructType &) LLVM_DELETED_FUNCTION;
StructType(LLVMContext &C)
- : CompositeType(C, StructTyID), SymbolTableEntry(0) {}
+ : CompositeType(C, StructTyID), SymbolTableEntry(nullptr) {}
enum {
/// This is the contents of the SubClassData field.
SCDB_HasBody = 1,
@@ -249,10 +249,10 @@ public:
bool isOpaque() const { return (getSubclassData() & SCDB_HasBody) == 0; }
/// isSized - Return true if this is a sized type.
- bool isSized(SmallPtrSet<const Type*, 4> *Visited = 0) const;
+ bool isSized(SmallPtrSet<const Type*, 4> *Visited = nullptr) const;
/// hasName - Return true if this is a named struct that has a non-empty name.
- bool hasName() const { return SymbolTableEntry != 0; }
+ bool hasName() const { return SymbolTableEntry != nullptr; }
/// getName - Return the name for this struct type if it has an identity.
/// This may return an empty string for an unnamed struct type. Do not call
diff --git a/include/llvm/IR/DiagnosticInfo.h b/include/llvm/IR/DiagnosticInfo.h
index 49eb1b0..e78a42b 100644
--- a/include/llvm/IR/DiagnosticInfo.h
+++ b/include/llvm/IR/DiagnosticInfo.h
@@ -15,7 +15,9 @@
#ifndef LLVM_SUPPORT_DIAGNOSTICINFO_H
#define LLVM_SUPPORT_DIAGNOSTICINFO_H
+#include "llvm-c/Core.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/DebugLoc.h"
#include "llvm/Support/Casting.h"
namespace llvm {
@@ -24,8 +26,10 @@ namespace llvm {
class DiagnosticPrinter;
class Function;
class Instruction;
+class LLVMContextImpl;
class Twine;
class Value;
+class DebugLoc;
/// \brief Defines the different supported severity of a diagnostic.
enum DiagnosticSeverity {
@@ -44,6 +48,9 @@ enum DiagnosticKind {
DK_StackSize,
DK_DebugMetadataVersion,
DK_SampleProfile,
+ DK_OptimizationRemark,
+ DK_OptimizationRemarkMissed,
+ DK_OptimizationRemarkAnalysis,
DK_FirstPluginKind
};
@@ -105,7 +112,7 @@ public:
DiagnosticInfoInlineAsm(const Twine &MsgStr,
DiagnosticSeverity Severity = DS_Error)
: DiagnosticInfo(DK_InlineAsm, Severity), LocCookie(0), MsgStr(MsgStr),
- Instr(NULL) {}
+ Instr(nullptr) {}
/// \p LocCookie if non-zero gives the line number for this report.
/// \p MsgStr gives the message.
@@ -114,7 +121,7 @@ public:
DiagnosticInfoInlineAsm(unsigned LocCookie, const Twine &MsgStr,
DiagnosticSeverity Severity = DS_Error)
: DiagnosticInfo(DK_InlineAsm, Severity), LocCookie(LocCookie),
- MsgStr(MsgStr), Instr(NULL) {}
+ MsgStr(MsgStr), Instr(nullptr) {}
/// \p Instr gives the original instruction that triggered the diagnostic.
/// \p MsgStr gives the message.
@@ -208,7 +215,7 @@ public:
LineNum(0), Msg(Msg) {}
DiagnosticInfoSampleProfile(const Twine &Msg,
DiagnosticSeverity Severity = DS_Error)
- : DiagnosticInfo(DK_SampleProfile, Severity), FileName(NULL),
+ : DiagnosticInfo(DK_SampleProfile, Severity), FileName(nullptr),
LineNum(0), Msg(Msg) {}
/// \see DiagnosticInfo::print.
@@ -227,7 +234,7 @@ private:
/// Name of the input file associated with this diagnostic.
const char *FileName;
- /// Line number where the diagnostic occured. If 0, no line number will
+ /// Line number where the diagnostic occurred. If 0, no line number will
/// be emitted in the message.
unsigned LineNum;
@@ -235,6 +242,183 @@ private:
const Twine &Msg;
};
+/// Common features for diagnostics dealing with optimization remarks.
+class DiagnosticInfoOptimizationRemarkBase : public DiagnosticInfo {
+public:
+ /// \p PassName is the name of the pass emitting this diagnostic.
+ /// \p Fn is the function where the diagnostic is being emitted. \p DLoc is
+ /// the location information to use in the diagnostic. If line table
+ /// information is available, the diagnostic will include the source code
+ /// location. \p Msg is the message to show. Note that this class does not
+ /// copy this message, so this reference must be valid for the whole life time
+ /// of the diagnostic.
+ DiagnosticInfoOptimizationRemarkBase(enum DiagnosticKind Kind,
+ const char *PassName, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg)
+ : DiagnosticInfo(Kind, DS_Remark), PassName(PassName), Fn(Fn), DLoc(DLoc),
+ Msg(Msg) {}
+
+ /// \see DiagnosticInfo::print.
+ void print(DiagnosticPrinter &DP) const override;
+
+ /// Hand rolled RTTI.
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_OptimizationRemark;
+ }
+
+ /// Return true if this optimization remark is enabled by one of
+ /// of the LLVM command line flags (-pass-remarks, -pass-remarks-missed,
+ /// or -pass-remarks-analysis). Note that this only handles the LLVM
+ /// flags. We cannot access Clang flags from here (they are handled
+ /// in BackendConsumer::OptimizationRemarkHandler).
+ virtual bool isEnabled() const = 0;
+
+ /// Return true if location information is available for this diagnostic.
+ bool isLocationAvailable() const;
+
+ /// Return a string with the location information for this diagnostic
+ /// in the format "file:line:col". If location information is not available,
+ /// it returns "<unknown>:0:0".
+ const std::string getLocationStr() const;
+
+ /// Return location information for this diagnostic in three parts:
+ /// the source file name, line number and column.
+ void getLocation(StringRef *Filename, unsigned *Line, unsigned *Column) const;
+
+ const char *getPassName() const { return PassName; }
+ const Function &getFunction() const { return Fn; }
+ const DebugLoc &getDebugLoc() const { return DLoc; }
+ const Twine &getMsg() const { return Msg; }
+
+private:
+ /// Name of the pass that triggers this report. If this matches the
+ /// regular expression given in -Rpass=regexp, then the remark will
+ /// be emitted.
+ const char *PassName;
+
+ /// Function where this diagnostic is triggered.
+ const Function &Fn;
+
+ /// Debug location where this diagnostic is triggered.
+ DebugLoc DLoc;
+
+ /// Message to report.
+ const Twine &Msg;
+};
+
+/// Diagnostic information for applied optimization remarks.
+class DiagnosticInfoOptimizationRemark
+ : public DiagnosticInfoOptimizationRemarkBase {
+public:
+ /// \p PassName is the name of the pass emitting this diagnostic. If
+ /// this name matches the regular expression given in -Rpass=, then the
+ /// diagnostic will be emitted. \p Fn is the function where the diagnostic
+ /// is being emitted. \p DLoc is the location information to use in the
+ /// diagnostic. If line table information is available, the diagnostic
+ /// will include the source code location. \p Msg is the message to show.
+ /// Note that this class does not copy this message, so this reference
+ /// must be valid for the whole life time of the diagnostic.
+ DiagnosticInfoOptimizationRemark(const char *PassName, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg)
+ : DiagnosticInfoOptimizationRemarkBase(DK_OptimizationRemark, PassName,
+ Fn, DLoc, Msg) {}
+
+ /// Hand rolled RTTI
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_OptimizationRemark;
+ }
+
+ /// \see DiagnosticInfoOptimizationRemarkBase::isEnabled.
+ virtual bool isEnabled() const override;
+};
+
+/// Diagnostic information for missed-optimization remarks.
+class DiagnosticInfoOptimizationRemarkMissed
+ : public DiagnosticInfoOptimizationRemarkBase {
+public:
+ /// \p PassName is the name of the pass emitting this diagnostic. If
+ /// this name matches the regular expression given in -Rpass-missed=, then the
+ /// diagnostic will be emitted. \p Fn is the function where the diagnostic
+ /// is being emitted. \p DLoc is the location information to use in the
+ /// diagnostic. If line table information is available, the diagnostic
+ /// will include the source code location. \p Msg is the message to show.
+ /// Note that this class does not copy this message, so this reference
+ /// must be valid for the whole life time of the diagnostic.
+ DiagnosticInfoOptimizationRemarkMissed(const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg)
+ : DiagnosticInfoOptimizationRemarkBase(DK_OptimizationRemarkMissed,
+ PassName, Fn, DLoc, Msg) {}
+
+ /// Hand rolled RTTI
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_OptimizationRemarkMissed;
+ }
+
+ /// \see DiagnosticInfoOptimizationRemarkBase::isEnabled.
+ virtual bool isEnabled() const override;
+};
+
+/// Diagnostic information for optimization analysis remarks.
+class DiagnosticInfoOptimizationRemarkAnalysis
+ : public DiagnosticInfoOptimizationRemarkBase {
+public:
+ /// \p PassName is the name of the pass emitting this diagnostic. If
+ /// this name matches the regular expression given in -Rpass-analysis=, then
+ /// the diagnostic will be emitted. \p Fn is the function where the diagnostic
+ /// is being emitted. \p DLoc is the location information to use in the
+ /// diagnostic. If line table information is available, the diagnostic will
+ /// include the source code location. \p Msg is the message to show. Note that
+ /// this class does not copy this message, so this reference must be valid for
+ /// the whole life time of the diagnostic.
+ DiagnosticInfoOptimizationRemarkAnalysis(const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc,
+ const Twine &Msg)
+ : DiagnosticInfoOptimizationRemarkBase(DK_OptimizationRemarkAnalysis,
+ PassName, Fn, DLoc, Msg) {}
+
+ /// Hand rolled RTTI
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_OptimizationRemarkAnalysis;
+ }
+
+ /// \see DiagnosticInfoOptimizationRemarkBase::isEnabled.
+ virtual bool isEnabled() const override;
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(DiagnosticInfo, LLVMDiagnosticInfoRef)
+
+/// Emit an optimization-applied message. \p PassName is the name of the pass
+/// emitting the message. If -Rpass= is given and \p PassName matches the
+/// regular expression in -Rpass, then the remark will be emitted. \p Fn is
+/// the function triggering the remark, \p DLoc is the debug location where
+/// the diagnostic is generated. \p Msg is the message string to use.
+void emitOptimizationRemark(LLVMContext &Ctx, const char *PassName,
+ const Function &Fn, const DebugLoc &DLoc,
+ const Twine &Msg);
+
+/// Emit an optimization-missed message. \p PassName is the name of the
+/// pass emitting the message. If -Rpass-missed= is given and \p PassName
+/// matches the regular expression in -Rpass, then the remark will be
+/// emitted. \p Fn is the function triggering the remark, \p DLoc is the
+/// debug location where the diagnostic is generated. \p Msg is the
+/// message string to use.
+void emitOptimizationRemarkMissed(LLVMContext &Ctx, const char *PassName,
+ const Function &Fn, const DebugLoc &DLoc,
+ const Twine &Msg);
+
+/// Emit an optimization analysis remark message. \p PassName is the name of
+/// the pass emitting the message. If -Rpass-analysis= is given and \p
+/// PassName matches the regular expression in -Rpass, then the remark will be
+/// emitted. \p Fn is the function triggering the remark, \p DLoc is the debug
+/// location where the diagnostic is generated. \p Msg is the message string
+/// to use.
+void emitOptimizationRemarkAnalysis(LLVMContext &Ctx, const char *PassName,
+ const Function &Fn, const DebugLoc &DLoc,
+ const Twine &Msg);
+
} // End namespace llvm
#endif
diff --git a/include/llvm/IR/Dominators.h b/include/llvm/IR/Dominators.h
index 86bbe39..3648202 100644
--- a/include/llvm/IR/Dominators.h
+++ b/include/llvm/IR/Dominators.h
@@ -182,7 +182,7 @@ public:
void releaseMemory() override { DT.releaseMemory(); }
- void print(raw_ostream &OS, const Module *M = 0) const override;
+ void print(raw_ostream &OS, const Module *M = nullptr) const override;
};
} // End llvm namespace
diff --git a/include/llvm/IR/Function.h b/include/llvm/IR/Function.h
index cb43bba..22444bd 100644
--- a/include/llvm/IR/Function.h
+++ b/include/llvm/IR/Function.h
@@ -23,7 +23,7 @@
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallingConv.h"
-#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalObject.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -68,8 +68,7 @@ private:
mutable ilist_half_node<Argument> Sentinel;
};
-class Function : public GlobalValue,
- public ilist_node<Function> {
+class Function : public GlobalObject, public ilist_node<Function> {
public:
typedef iplist<Argument> ArgumentListType;
typedef iplist<BasicBlock> BasicBlockListType;
@@ -123,11 +122,11 @@ private:
/// the module.
///
Function(FunctionType *Ty, LinkageTypes Linkage,
- const Twine &N = "", Module *M = 0);
+ const Twine &N = "", Module *M = nullptr);
public:
static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
- const Twine &N = "", Module *M = 0) {
+ const Twine &N = "", Module *M = nullptr) {
return new(0) Function(Ty, Linkage, N, M);
}
@@ -298,7 +297,8 @@ public:
/// @brief Determine if the function returns a structure through first
/// pointer argument.
bool hasStructRetAttr() const {
- return AttributeSets.hasAttribute(1, Attribute::StructRet);
+ return AttributeSets.hasAttribute(1, Attribute::StructRet) ||
+ AttributeSets.hasAttribute(2, Attribute::StructRet);
}
/// @brief Determine if the parameter does not alias other parameters.
@@ -483,7 +483,7 @@ public:
/// other than direct calls or invokes to it, or blockaddress expressions.
/// Optionally passes back an offending user for diagnostic purposes.
///
- bool hasAddressTaken(const User** = 0) const;
+ bool hasAddressTaken(const User** = nullptr) const;
/// isDefTriviallyDead - Return true if it is trivially safe to remove
/// this function definition from the module (because it isn't externally
@@ -505,12 +505,12 @@ private:
inline ValueSymbolTable *
ilist_traits<BasicBlock>::getSymTab(Function *F) {
- return F ? &F->getValueSymbolTable() : 0;
+ return F ? &F->getValueSymbolTable() : nullptr;
}
inline ValueSymbolTable *
ilist_traits<Argument>::getSymTab(Function *F) {
- return F ? &F->getValueSymbolTable() : 0;
+ return F ? &F->getValueSymbolTable() : nullptr;
}
} // End llvm namespace
diff --git a/include/llvm/IR/GVMaterializer.h b/include/llvm/IR/GVMaterializer.h
index 6717bc8..dbe52bc 100644
--- a/include/llvm/IR/GVMaterializer.h
+++ b/include/llvm/IR/GVMaterializer.h
@@ -33,26 +33,26 @@ protected:
public:
virtual ~GVMaterializer();
- /// isMaterializable - True if GV can be materialized from whatever backing
- /// store this GVMaterializer uses and has not been materialized yet.
+ /// True if GV can be materialized from whatever backing store this
+ /// GVMaterializer uses and has not been materialized yet.
virtual bool isMaterializable(const GlobalValue *GV) const = 0;
- /// isDematerializable - True if GV has been materialized and can be
- /// dematerialized back to whatever backing store this GVMaterializer uses.
+ /// True if GV has been materialized and can be dematerialized back to
+ /// whatever backing store this GVMaterializer uses.
virtual bool isDematerializable(const GlobalValue *GV) const = 0;
- /// Materialize - make sure the given GlobalValue is fully read.
+ /// Make sure the given GlobalValue is fully read.
///
virtual error_code Materialize(GlobalValue *GV) = 0;
- /// Dematerialize - If the given GlobalValue is read in, and if the
- /// GVMaterializer supports it, release the memory for the GV, and set it up
- /// to be materialized lazily. If the Materializer doesn't support this
- /// capability, this method is a noop.
+ /// If the given GlobalValue is read in, and if the GVMaterializer supports
+ /// it, release the memory for the GV, and set it up to be materialized
+ /// lazily. If the Materializer doesn't support this capability, this method
+ /// is a noop.
///
virtual void Dematerialize(GlobalValue *) {}
- /// MaterializeModule - make sure the entire Module has been completely read.
+ /// Make sure the entire Module has been completely read.
///
virtual error_code MaterializeModule(Module *M) = 0;
};
diff --git a/include/llvm/IR/GetElementPtrTypeIterator.h b/include/llvm/IR/GetElementPtrTypeIterator.h
index f2722d6..dcf8e64 100644
--- a/include/llvm/IR/GetElementPtrTypeIterator.h
+++ b/include/llvm/IR/GetElementPtrTypeIterator.h
@@ -38,7 +38,7 @@ namespace llvm {
}
static generic_gep_type_iterator end(ItTy It) {
generic_gep_type_iterator I;
- I.CurTy = 0;
+ I.CurTy = nullptr;
I.OpIt = It;
return I;
}
@@ -69,7 +69,7 @@ namespace llvm {
if (CompositeType *CT = dyn_cast<CompositeType>(CurTy)) {
CurTy = CT->getTypeAtIndex(getOperand());
} else {
- CurTy = 0;
+ CurTy = nullptr;
}
++OpIt;
return *this;
diff --git a/include/llvm/IR/GlobalAlias.h b/include/llvm/IR/GlobalAlias.h
index 2ca481a..d9f0b4a 100644
--- a/include/llvm/IR/GlobalAlias.h
+++ b/include/llvm/IR/GlobalAlias.h
@@ -33,15 +33,37 @@ class GlobalAlias : public GlobalValue, public ilist_node<GlobalAlias> {
void setParent(Module *parent);
+ GlobalAlias(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage,
+ const Twine &Name, GlobalObject *Aliasee, Module *Parent);
+
public:
// allocate space for exactly one operand
void *operator new(size_t s) {
return User::operator new(s, 1);
}
- /// GlobalAlias ctor - If a parent module is specified, the alias is
- /// automatically inserted into the end of the specified module's alias list.
- GlobalAlias(Type *Ty, LinkageTypes Linkage, const Twine &Name = "",
- Constant* Aliasee = 0, Module *Parent = 0);
+
+ /// If a parent module is specified, the alias is automatically inserted into
+ /// the end of the specified module's alias list.
+ static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Linkage, const Twine &Name,
+ GlobalObject *Aliasee, Module *Parent);
+
+ // Without the Aliasee.
+ static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Linkage, const Twine &Name,
+ Module *Parent);
+
+ // The module is taken from the Aliasee.
+ static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Linkage, const Twine &Name,
+ GlobalObject *Aliasee);
+
+ // Type, Parent and AddressSpace taken from the Aliasee.
+ static GlobalAlias *create(LinkageTypes Linkage, const Twine &Name,
+ GlobalObject *Aliasee);
+
+ // Linkage, Type, Parent and AddressSpace taken from the Aliasee.
+ static GlobalAlias *create(const Twine &Name, GlobalObject *Aliasee);
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
@@ -57,20 +79,13 @@ public:
void eraseFromParent() override;
/// set/getAliasee - These methods retrive and set alias target.
- void setAliasee(Constant *GV);
- const Constant *getAliasee() const {
- return getOperand(0);
- }
- Constant *getAliasee() {
- return getOperand(0);
+ void setAliasee(GlobalObject *GO);
+ const GlobalObject *getAliasee() const {
+ return const_cast<GlobalAlias *>(this)->getAliasee();
}
- /// This method tries to ultimately resolve the alias by going through the
- /// aliasing chain and trying to find the very last global. Returns NULL if a
- /// cycle was found.
- GlobalValue *getAliasedGlobal();
- const GlobalValue *getAliasedGlobal() const {
- return const_cast<GlobalAlias *>(this)->getAliasedGlobal();
+ GlobalObject *getAliasee() {
+ return cast_or_null<GlobalObject>(getOperand(0));
}
static bool isValidLinkage(LinkageTypes L) {
diff --git a/include/llvm/IR/GlobalObject.h b/include/llvm/IR/GlobalObject.h
new file mode 100644
index 0000000..3bc8b85
--- /dev/null
+++ b/include/llvm/IR/GlobalObject.h
@@ -0,0 +1,58 @@
+//===-- llvm/GlobalObject.h - Class to represent a global object *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This represents an independent object. That is, a function or a global
+// variable, but not an alias.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GLOBALOBJECT_H
+#define LLVM_IR_GLOBALOBJECT_H
+
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GlobalValue.h"
+
+namespace llvm {
+
+class Module;
+
+class GlobalObject : public GlobalValue {
+ GlobalObject(const GlobalObject &) LLVM_DELETED_FUNCTION;
+
+protected:
+ GlobalObject(Type *Ty, ValueTy VTy, Use *Ops, unsigned NumOps,
+ LinkageTypes Linkage, const Twine &Name)
+ : GlobalValue(Ty, VTy, Ops, NumOps, Linkage, Name) {
+ setGlobalValueSubClassData(0);
+ }
+
+ std::string Section; // Section to emit this into, empty means default
+public:
+ unsigned getAlignment() const {
+ return (1u << getGlobalValueSubClassData()) >> 1;
+ }
+ void setAlignment(unsigned Align);
+
+ bool hasSection() const { return !getSection().empty(); }
+ const std::string &getSection() const { return Section; }
+ void setSection(StringRef S);
+
+ void copyAttributesFrom(const GlobalValue *Src) override;
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Value *V) {
+ return V->getValueID() == Value::FunctionVal ||
+ V->getValueID() == Value::GlobalVariableVal;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/IR/GlobalValue.h b/include/llvm/IR/GlobalValue.h
index 59c320d..10df372 100644
--- a/include/llvm/IR/GlobalValue.h
+++ b/include/llvm/IR/GlobalValue.h
@@ -59,11 +59,11 @@ public:
};
protected:
- GlobalValue(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps,
- LinkageTypes linkage, const Twine &Name)
- : Constant(ty, vty, Ops, NumOps), Linkage(linkage),
- Visibility(DefaultVisibility), Alignment(0), UnnamedAddr(0),
- DllStorageClass(DefaultStorageClass), Parent(0) {
+ GlobalValue(Type *Ty, ValueTy VTy, Use *Ops, unsigned NumOps,
+ LinkageTypes Linkage, const Twine &Name)
+ : Constant(Ty, VTy, Ops, NumOps), Linkage(Linkage),
+ Visibility(DefaultVisibility), UnnamedAddr(0),
+ DllStorageClass(DefaultStorageClass), Parent(nullptr) {
setName(Name);
}
@@ -71,20 +71,29 @@ protected:
// Linkage and Visibility from turning into negative values.
LinkageTypes Linkage : 5; // The linkage of this global
unsigned Visibility : 2; // The visibility style of this global
- unsigned Alignment : 16; // Alignment of this symbol, must be power of two
unsigned UnnamedAddr : 1; // This value's address is not significant
unsigned DllStorageClass : 2; // DLL storage class
+
+private:
+ // Give subclasses access to what otherwise would be wasted padding.
+ // (22 + 2 + 1 + 2 + 5) == 32.
+ unsigned SubClassData : 22;
+protected:
+ unsigned getGlobalValueSubClassData() const {
+ return SubClassData;
+ }
+ void setGlobalValueSubClassData(unsigned V) {
+ assert(V < (1 << 22) && "It will not fit");
+ SubClassData = V;
+ }
+
Module *Parent; // The containing module.
- std::string Section; // Section to emit this into, empty mean default
public:
~GlobalValue() {
removeDeadConstantUsers(); // remove any dead constants using this.
}
- unsigned getAlignment() const {
- return (1u << Alignment) >> 1;
- }
- void setAlignment(unsigned Align);
+ unsigned getAlignment() const;
bool hasUnnamedAddr() const { return UnnamedAddr; }
void setUnnamedAddr(bool Val) { UnnamedAddr = Val; }
@@ -95,7 +104,11 @@ public:
bool hasProtectedVisibility() const {
return Visibility == ProtectedVisibility;
}
- void setVisibility(VisibilityTypes V) { Visibility = V; }
+ void setVisibility(VisibilityTypes V) {
+ assert((!hasLocalLinkage() || V == DefaultVisibility) &&
+ "local linkage requires default visibility");
+ Visibility = V;
+ }
DLLStorageClassTypes getDLLStorageClass() const {
return DLLStorageClassTypes(DllStorageClass);
@@ -108,22 +121,10 @@ public:
}
void setDLLStorageClass(DLLStorageClassTypes C) { DllStorageClass = C; }
- bool hasSection() const { return !Section.empty(); }
- const std::string &getSection() const { return Section; }
- void setSection(StringRef S) {
- assert((getValueID() != Value::GlobalAliasVal || S.empty()) &&
- "GlobalAlias should not have a section!");
- Section = S;
- }
-
- /// If the usage is empty (except transitively dead constants), then this
- /// global value can be safely deleted since the destructor will
- /// delete the dead constants as well.
- /// @brief Determine if the usage of this global value is empty except
- /// for transitively dead constants.
- bool use_empty_except_constants();
+ bool hasSection() const { return !getSection().empty(); }
+ const std::string &getSection() const;
- /// getType - Global values are always pointers.
+ /// Global values are always pointers.
inline PointerType *getType() const {
return cast<PointerType>(User::getType());
}
@@ -144,8 +145,14 @@ public:
static bool isLinkOnceLinkage(LinkageTypes Linkage) {
return Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage;
}
+ static bool isWeakAnyLinkage(LinkageTypes Linkage) {
+ return Linkage == WeakAnyLinkage;
+ }
+ static bool isWeakODRLinkage(LinkageTypes Linkage) {
+ return Linkage == WeakODRLinkage;
+ }
static bool isWeakLinkage(LinkageTypes Linkage) {
- return Linkage == WeakAnyLinkage || Linkage == WeakODRLinkage;
+ return isWeakAnyLinkage(Linkage) || isWeakODRLinkage(Linkage);
}
static bool isAppendingLinkage(LinkageTypes Linkage) {
return Linkage == AppendingLinkage;
@@ -166,24 +173,24 @@ public:
return Linkage == CommonLinkage;
}
- /// isDiscardableIfUnused - Whether the definition of this global may be
- /// discarded if it is not used in its compilation unit.
+ /// Whether the definition of this global may be discarded if it is not used
+ /// in its compilation unit.
static bool isDiscardableIfUnused(LinkageTypes Linkage) {
return isLinkOnceLinkage(Linkage) || isLocalLinkage(Linkage);
}
- /// mayBeOverridden - Whether the definition of this global may be replaced
- /// by something non-equivalent at link time. For example, if a function has
- /// weak linkage then the code defining it may be replaced by different code.
+ /// Whether the definition of this global may be replaced by something
+ /// non-equivalent at link time. For example, if a function has weak linkage
+ /// then the code defining it may be replaced by different code.
static bool mayBeOverridden(LinkageTypes Linkage) {
return Linkage == WeakAnyLinkage || Linkage == LinkOnceAnyLinkage ||
Linkage == CommonLinkage || Linkage == ExternalWeakLinkage;
}
- /// isWeakForLinker - Whether the definition of this global may be replaced at
- /// link time. NB: Using this method outside of the code generators is almost
- /// always a mistake: when working at the IR level use mayBeOverridden instead
- /// as it knows about ODR semantics.
+ /// Whether the definition of this global may be replaced at link time. NB:
+ /// Using this method outside of the code generators is almost always a
+ /// mistake: when working at the IR level use mayBeOverridden instead as it
+ /// knows about ODR semantics.
static bool isWeakForLinker(LinkageTypes Linkage) {
return Linkage == AvailableExternallyLinkage || Linkage == WeakAnyLinkage ||
Linkage == WeakODRLinkage || Linkage == LinkOnceAnyLinkage ||
@@ -201,6 +208,12 @@ public:
bool hasWeakLinkage() const {
return isWeakLinkage(Linkage);
}
+ bool hasWeakAnyLinkage() const {
+ return isWeakAnyLinkage(Linkage);
+ }
+ bool hasWeakODRLinkage() const {
+ return isWeakODRLinkage(Linkage);
+ }
bool hasAppendingLinkage() const { return isAppendingLinkage(Linkage); }
bool hasInternalLinkage() const { return isInternalLinkage(Linkage); }
bool hasPrivateLinkage() const { return isPrivateLinkage(Linkage); }
@@ -208,7 +221,11 @@ public:
bool hasExternalWeakLinkage() const { return isExternalWeakLinkage(Linkage); }
bool hasCommonLinkage() const { return isCommonLinkage(Linkage); }
- void setLinkage(LinkageTypes LT) { Linkage = LT; }
+ void setLinkage(LinkageTypes LT) {
+ if (isLocalLinkage(LT))
+ Visibility = DefaultVisibility;
+ Linkage = LT;
+ }
LinkageTypes getLinkage() const { return Linkage; }
bool isDiscardableIfUnused() const {
@@ -219,13 +236,13 @@ public:
bool isWeakForLinker() const { return isWeakForLinker(Linkage); }
- /// copyAttributesFrom - copy all additional attributes (those not needed to
- /// create a GlobalValue) from the GlobalValue Src to this one.
+ /// Copy all additional attributes (those not needed to create a GlobalValue)
+ /// from the GlobalValue Src to this one.
virtual void copyAttributesFrom(const GlobalValue *Src);
- /// getRealLinkageName - If special LLVM prefix that is used to inform the asm
- /// printer to not emit usual symbol prefix before the symbol name is used
- /// then return linkage name after skipping this special LLVM prefix.
+ /// If special LLVM prefix that is used to inform the asm printer to not emit
+ /// usual symbol prefix before the symbol name is used then return linkage
+ /// name after skipping this special LLVM prefix.
static StringRef getRealLinkageName(StringRef Name) {
if (!Name.empty() && Name[0] == '\1')
return Name.substr(1);
@@ -238,24 +255,24 @@ public:
/// BitcodeReader to load the Module.
/// @{
- /// isMaterializable - If this function's Module is being lazily streamed in
- /// functions from disk or some other source, this method can be used to check
- /// to see if the function has been read in yet or not.
+ /// If this function's Module is being lazily streamed in functions from disk
+ /// or some other source, this method can be used to check to see if the
+ /// function has been read in yet or not.
bool isMaterializable() const;
- /// isDematerializable - Returns true if this function was loaded from a
- /// GVMaterializer that's still attached to its Module and that knows how to
- /// dematerialize the function.
+ /// Returns true if this function was loaded from a GVMaterializer that's
+ /// still attached to its Module and that knows how to dematerialize the
+ /// function.
bool isDematerializable() const;
- /// Materialize - make sure this GlobalValue is fully read. If the module is
- /// corrupt, this returns true and fills in the optional string with
- /// information about the problem. If successful, this returns false.
- bool Materialize(std::string *ErrInfo = 0);
+ /// Make sure this GlobalValue is fully read. If the module is corrupt, this
+ /// returns true and fills in the optional string with information about the
+ /// problem. If successful, this returns false.
+ bool Materialize(std::string *ErrInfo = nullptr);
- /// Dematerialize - If this GlobalValue is read in, and if the GVMaterializer
- /// supports it, release the memory for the function, and set it up to be
- /// materialized lazily. If !isDematerializable(), this method is a noop.
+ /// If this GlobalValue is read in, and if the GVMaterializer supports it,
+ /// release the memory for the function, and set it up to be materialized
+ /// lazily. If !isDematerializable(), this method is a noop.
void Dematerialize();
/// @}
@@ -263,20 +280,18 @@ public:
/// Override from Constant class.
void destroyConstant() override;
- /// isDeclaration - Return true if the primary definition of this global
- /// value is outside of the current translation unit.
+ /// Return true if the primary definition of this global value is outside of
+ /// the current translation unit.
bool isDeclaration() const;
- /// removeFromParent - This method unlinks 'this' from the containing module,
- /// but does not delete it.
+ /// This method unlinks 'this' from the containing module, but does not delete
+ /// it.
virtual void removeFromParent() = 0;
- /// eraseFromParent - This method unlinks 'this' from the containing module
- /// and deletes it.
+ /// This method unlinks 'this' from the containing module and deletes it.
virtual void eraseFromParent() = 0;
- /// getParent - Get the module that this global value is contained inside
- /// of...
+ /// Get the module that this global value is contained inside of...
inline Module *getParent() { return Parent; }
inline const Module *getParent() const { return Parent; }
diff --git a/include/llvm/IR/GlobalVariable.h b/include/llvm/IR/GlobalVariable.h
index a82740f..8cd4332 100644
--- a/include/llvm/IR/GlobalVariable.h
+++ b/include/llvm/IR/GlobalVariable.h
@@ -22,7 +22,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist_node.h"
-#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalObject.h"
#include "llvm/IR/OperandTraits.h"
namespace llvm {
@@ -32,7 +32,7 @@ class Constant;
template<typename ValueSubClass, typename ItemParentClass>
class SymbolTableListTraits;
-class GlobalVariable : public GlobalValue, public ilist_node<GlobalVariable> {
+class GlobalVariable : public GlobalObject, public ilist_node<GlobalVariable> {
friend class SymbolTableListTraits<GlobalVariable, Module>;
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
void operator=(const GlobalVariable &) LLVM_DELETED_FUNCTION;
@@ -66,14 +66,14 @@ public:
/// GlobalVariable ctor - If a parent module is specified, the global is
/// automatically inserted into the end of the specified modules global list.
GlobalVariable(Type *Ty, bool isConstant, LinkageTypes Linkage,
- Constant *Initializer = 0, const Twine &Name = "",
+ Constant *Initializer = nullptr, const Twine &Name = "",
ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0,
bool isExternallyInitialized = false);
/// GlobalVariable ctor - This creates a global and inserts it before the
/// specified other global.
GlobalVariable(Module &M, Type *Ty, bool isConstant,
LinkageTypes Linkage, Constant *Initializer,
- const Twine &Name = "", GlobalVariable *InsertBefore = 0,
+ const Twine &Name = "", GlobalVariable *InsertBefore = nullptr,
ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0,
bool isExternallyInitialized = false);
diff --git a/include/llvm/IR/IRBuilder.h b/include/llvm/IR/IRBuilder.h
index 79ee7b7..580d333 100644
--- a/include/llvm/IR/IRBuilder.h
+++ b/include/llvm/IR/IRBuilder.h
@@ -58,7 +58,7 @@ protected:
FastMathFlags FMF;
public:
- IRBuilderBase(LLVMContext &context, MDNode *FPMathTag = 0)
+ IRBuilderBase(LLVMContext &context, MDNode *FPMathTag = nullptr)
: Context(context), DefaultFPMathTag(FPMathTag), FMF() {
ClearInsertionPoint();
}
@@ -70,8 +70,8 @@ public:
/// \brief Clear the insertion point: created instructions will not be
/// inserted into a block.
void ClearInsertionPoint() {
- BB = 0;
- InsertPt = 0;
+ BB = nullptr;
+ InsertPt = nullptr;
}
BasicBlock *GetInsertBlock() const { return BB; }
@@ -140,14 +140,14 @@ public:
public:
/// \brief Creates a new insertion point which doesn't point to anything.
- InsertPoint() : Block(0) {}
+ InsertPoint() : Block(nullptr) {}
/// \brief Creates a new insertion point at the given location.
InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
: Block(InsertBlock), Point(InsertPoint) {}
/// \brief Returns true if this insert point is set.
- bool isSet() const { return (Block != 0); }
+ bool isSet() const { return (Block != nullptr); }
llvm::BasicBlock *getBlock() const { return Block; }
llvm::BasicBlock::iterator getPoint() const { return Point; }
@@ -362,27 +362,27 @@ public:
/// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
/// specified, it will be added to the instruction.
CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, unsigned Align,
- bool isVolatile = false, MDNode *TBAATag = 0) {
+ bool isVolatile = false, MDNode *TBAATag = nullptr) {
return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile, TBAATag);
}
CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align,
- bool isVolatile = false, MDNode *TBAATag = 0);
+ bool isVolatile = false, MDNode *TBAATag = nullptr);
/// \brief Create and insert a memcpy between the specified pointers.
///
/// If the pointers aren't i8*, they will be converted. If a TBAA tag is
/// specified, it will be added to the instruction.
CallInst *CreateMemCpy(Value *Dst, Value *Src, uint64_t Size, unsigned Align,
- bool isVolatile = false, MDNode *TBAATag = 0,
- MDNode *TBAAStructTag = 0) {
+ bool isVolatile = false, MDNode *TBAATag = nullptr,
+ MDNode *TBAAStructTag = nullptr) {
return CreateMemCpy(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag,
TBAAStructTag);
}
CallInst *CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align,
- bool isVolatile = false, MDNode *TBAATag = 0,
- MDNode *TBAAStructTag = 0);
+ bool isVolatile = false, MDNode *TBAATag = nullptr,
+ MDNode *TBAAStructTag = nullptr);
/// \brief Create and insert a memmove between the specified
/// pointers.
@@ -390,22 +390,22 @@ public:
/// If the pointers aren't i8*, they will be converted. If a TBAA tag is
/// specified, it will be added to the instruction.
CallInst *CreateMemMove(Value *Dst, Value *Src, uint64_t Size, unsigned Align,
- bool isVolatile = false, MDNode *TBAATag = 0) {
+ bool isVolatile = false, MDNode *TBAATag = nullptr) {
return CreateMemMove(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag);
}
CallInst *CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align,
- bool isVolatile = false, MDNode *TBAATag = 0);
+ bool isVolatile = false, MDNode *TBAATag = nullptr);
/// \brief Create a lifetime.start intrinsic.
///
/// If the pointer isn't i8* it will be converted.
- CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = 0);
+ CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
/// \brief Create a lifetime.end intrinsic.
///
/// If the pointer isn't i8* it will be converted.
- CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = 0);
+ CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
private:
Value *getCastedInt8PtrValue(Value *Ptr);
@@ -433,43 +433,44 @@ class IRBuilder : public IRBuilderBase, public Inserter {
T Folder;
public:
IRBuilder(LLVMContext &C, const T &F, const Inserter &I = Inserter(),
- MDNode *FPMathTag = 0)
+ MDNode *FPMathTag = nullptr)
: IRBuilderBase(C, FPMathTag), Inserter(I), Folder(F) {
}
- explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = 0)
+ explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr)
: IRBuilderBase(C, FPMathTag), Folder() {
}
- explicit IRBuilder(BasicBlock *TheBB, const T &F, MDNode *FPMathTag = 0)
+ explicit IRBuilder(BasicBlock *TheBB, const T &F, MDNode *FPMathTag = nullptr)
: IRBuilderBase(TheBB->getContext(), FPMathTag), Folder(F) {
SetInsertPoint(TheBB);
}
- explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = 0)
+ explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr)
: IRBuilderBase(TheBB->getContext(), FPMathTag), Folder() {
SetInsertPoint(TheBB);
}
- explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = 0)
+ explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr)
: IRBuilderBase(IP->getContext(), FPMathTag), Folder() {
SetInsertPoint(IP);
SetCurrentDebugLocation(IP->getDebugLoc());
}
- explicit IRBuilder(Use &U, MDNode *FPMathTag = 0)
+ explicit IRBuilder(Use &U, MDNode *FPMathTag = nullptr)
: IRBuilderBase(U->getContext(), FPMathTag), Folder() {
SetInsertPoint(U);
SetCurrentDebugLocation(cast<Instruction>(U.getUser())->getDebugLoc());
}
IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T& F,
- MDNode *FPMathTag = 0)
+ MDNode *FPMathTag = nullptr)
: IRBuilderBase(TheBB->getContext(), FPMathTag), Folder(F) {
SetInsertPoint(TheBB, IP);
}
- IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, MDNode *FPMathTag = 0)
+ IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
+ MDNode *FPMathTag = nullptr)
: IRBuilderBase(TheBB->getContext(), FPMathTag), Folder() {
SetInsertPoint(TheBB, IP);
}
@@ -541,7 +542,7 @@ public:
/// \brief Create a conditional 'br Cond, TrueDest, FalseDest'
/// instruction.
BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
- MDNode *BranchWeights = 0) {
+ MDNode *BranchWeights = nullptr) {
return Insert(addBranchWeights(BranchInst::Create(True, False, Cond),
BranchWeights));
}
@@ -550,7 +551,7 @@ public:
/// and with a hint for the number of cases that will be added (for efficient
/// allocation).
SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
- MDNode *BranchWeights = 0) {
+ MDNode *BranchWeights = nullptr) {
return Insert(addBranchWeights(SwitchInst::Create(V, Dest, NumCases),
BranchWeights));
}
@@ -638,7 +639,7 @@ public:
return CreateAdd(LHS, RHS, Name, true, false);
}
Value *CreateFAdd(Value *LHS, Value *RHS, const Twine &Name = "",
- MDNode *FPMathTag = 0) {
+ MDNode *FPMathTag = nullptr) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Insert(Folder.CreateFAdd(LC, RC), Name);
@@ -660,7 +661,7 @@ public:
return CreateSub(LHS, RHS, Name, true, false);
}
Value *CreateFSub(Value *LHS, Value *RHS, const Twine &Name = "",
- MDNode *FPMathTag = 0) {
+ MDNode *FPMathTag = nullptr) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Insert(Folder.CreateFSub(LC, RC), Name);
@@ -682,7 +683,7 @@ public:
return CreateMul(LHS, RHS, Name, true, false);
}
Value *CreateFMul(Value *LHS, Value *RHS, const Twine &Name = "",
- MDNode *FPMathTag = 0) {
+ MDNode *FPMathTag = nullptr) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Insert(Folder.CreateFMul(LC, RC), Name);
@@ -714,7 +715,7 @@ public:
return CreateSDiv(LHS, RHS, Name, true);
}
Value *CreateFDiv(Value *LHS, Value *RHS, const Twine &Name = "",
- MDNode *FPMathTag = 0) {
+ MDNode *FPMathTag = nullptr) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Insert(Folder.CreateFDiv(LC, RC), Name);
@@ -734,7 +735,7 @@ public:
return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
}
Value *CreateFRem(Value *LHS, Value *RHS, const Twine &Name = "",
- MDNode *FPMathTag = 0) {
+ MDNode *FPMathTag = nullptr) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Insert(Folder.CreateFRem(LC, RC), Name);
@@ -844,7 +845,7 @@ public:
Value *CreateBinOp(Instruction::BinaryOps Opc,
Value *LHS, Value *RHS, const Twine &Name = "",
- MDNode *FPMathTag = 0) {
+ MDNode *FPMathTag = nullptr) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Insert(Folder.CreateBinOp(Opc, LC, RC), Name);
@@ -869,7 +870,8 @@ public:
Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
return CreateNeg(V, Name, true, false);
}
- Value *CreateFNeg(Value *V, const Twine &Name = "", MDNode *FPMathTag = 0) {
+ Value *CreateFNeg(Value *V, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
if (Constant *VC = dyn_cast<Constant>(V))
return Insert(Folder.CreateFNeg(VC), Name);
return Insert(AddFPMathAttributes(BinaryOperator::CreateFNeg(V),
@@ -885,7 +887,7 @@ public:
// Instruction creation methods: Memory Instructions
//===--------------------------------------------------------------------===//
- AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = 0,
+ AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
const Twine &Name = "") {
return Insert(new AllocaInst(Ty, ArraySize), Name);
}
@@ -898,7 +900,7 @@ public:
return Insert(new LoadInst(Ptr), Name);
}
LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") {
- return Insert(new LoadInst(Ptr, 0, isVolatile), Name);
+ return Insert(new LoadInst(Ptr, nullptr, isVolatile), Name);
}
StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
return Insert(new StoreInst(Val, Ptr, isVolatile));
diff --git a/include/llvm/IR/InstrTypes.h b/include/llvm/IR/InstrTypes.h
index e1a5130..a27859e 100644
--- a/include/llvm/IR/InstrTypes.h
+++ b/include/llvm/IR/InstrTypes.h
@@ -36,7 +36,7 @@ class TerminatorInst : public Instruction {
protected:
TerminatorInst(Type *Ty, Instruction::TermOps iType,
Use *Ops, unsigned NumOps,
- Instruction *InsertBefore = 0)
+ Instruction *InsertBefore = nullptr)
: Instruction(Ty, iType, Ops, NumOps, InsertBefore) {}
TerminatorInst(Type *Ty, Instruction::TermOps iType,
@@ -91,7 +91,7 @@ class UnaryInstruction : public Instruction {
protected:
UnaryInstruction(Type *Ty, unsigned iType, Value *V,
- Instruction *IB = 0)
+ Instruction *IB = nullptr)
: Instruction(Ty, iType, &Op<0>(), 1, IB) {
Op<0>() = V;
}
@@ -160,7 +160,7 @@ public:
///
static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
const Twine &Name = Twine(),
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
/// Create() - Construct a binary instruction, given the opcode and the two
/// operands. Also automatically insert this instruction to the end of the
@@ -285,23 +285,23 @@ public:
/// instructions out of SUB and XOR instructions.
///
static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
BasicBlock *InsertAtEnd);
static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
BasicBlock *InsertAtEnd);
static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
BasicBlock *InsertAtEnd);
static BinaryOperator *CreateFNeg(Value *Op, const Twine &Name = "",
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
static BinaryOperator *CreateFNeg(Value *Op, const Twine &Name,
BasicBlock *InsertAtEnd);
static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
BasicBlock *InsertAtEnd);
@@ -389,7 +389,7 @@ class CastInst : public UnaryInstruction {
protected:
/// @brief Constructor with insert-before-instruction semantics for subclasses
CastInst(Type *Ty, unsigned iType, Value *S,
- const Twine &NameStr = "", Instruction *InsertBefore = 0)
+ const Twine &NameStr = "", Instruction *InsertBefore = nullptr)
: UnaryInstruction(Ty, iType, S, InsertBefore) {
setName(NameStr);
}
@@ -411,7 +411,7 @@ public:
Value *S, ///< The value to be casted (operand 0)
Type *Ty, ///< The type to which cast should be made
const Twine &Name = "", ///< Name for the instruction
- Instruction *InsertBefore = 0 ///< Place to insert the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
);
/// Provides a way to construct any of the CastInst subclasses using an
/// opcode instead of the subclass's constructor. The opcode must be in the
@@ -432,7 +432,7 @@ public:
Value *S, ///< The value to be casted (operand 0)
Type *Ty, ///< The type to which cast should be made
const Twine &Name = "", ///< Name for the instruction
- Instruction *InsertBefore = 0 ///< Place to insert the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
);
/// @brief Create a ZExt or BitCast cast instruction
@@ -448,7 +448,7 @@ public:
Value *S, ///< The value to be casted (operand 0)
Type *Ty, ///< The type to which cast should be made
const Twine &Name = "", ///< Name for the instruction
- Instruction *InsertBefore = 0 ///< Place to insert the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
);
/// @brief Create a SExt or BitCast cast instruction
@@ -472,7 +472,7 @@ public:
Value *S, ///< The pointer value to be casted (operand 0)
Type *Ty, ///< The type to which cast should be made
const Twine &Name = "", ///< Name for the instruction
- Instruction *InsertBefore = 0 ///< Place to insert the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
);
/// @brief Create a ZExt, BitCast, or Trunc for int -> int casts.
@@ -481,7 +481,7 @@ public:
Type *Ty, ///< The type to which cast should be made
bool isSigned, ///< Whether to regard S as signed or not
const Twine &Name = "", ///< Name for the instruction
- Instruction *InsertBefore = 0 ///< Place to insert the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
);
/// @brief Create a ZExt, BitCast, or Trunc for int -> int casts.
@@ -498,7 +498,7 @@ public:
Value *S, ///< The floating point value to be casted
Type *Ty, ///< The floating point type to cast to
const Twine &Name = "", ///< Name for the instruction
- Instruction *InsertBefore = 0 ///< Place to insert the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
);
/// @brief Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
@@ -514,7 +514,7 @@ public:
Value *S, ///< The value to be casted (operand 0)
Type *Ty, ///< The type to which cast should be made
const Twine &Name = "", ///< Name for the instruction
- Instruction *InsertBefore = 0 ///< Place to insert the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
);
/// @brief Create a Trunc or BitCast cast instruction
@@ -641,7 +641,7 @@ class CmpInst : public Instruction {
protected:
CmpInst(Type *ty, Instruction::OtherOps op, unsigned short pred,
Value *LHS, Value *RHS, const Twine &Name = "",
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
CmpInst(Type *ty, Instruction::OtherOps op, unsigned short pred,
Value *LHS, Value *RHS, const Twine &Name,
@@ -701,7 +701,7 @@ public:
static CmpInst *Create(OtherOps Op,
unsigned short predicate, Value *S1,
Value *S2, const Twine &Name = "",
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
/// Construct a compare instruction, given the opcode, the predicate and the
/// two operands. Also automatically insert this instruction to the end of
diff --git a/include/llvm/IR/Instruction.h b/include/llvm/IR/Instruction.h
index 928dc07..bac6a95 100644
--- a/include/llvm/IR/Instruction.h
+++ b/include/llvm/IR/Instruction.h
@@ -141,14 +141,14 @@ public:
/// getMetadata - Get the metadata of given kind attached to this Instruction.
/// If the metadata is not found then return null.
MDNode *getMetadata(unsigned KindID) const {
- if (!hasMetadata()) return 0;
+ if (!hasMetadata()) return nullptr;
return getMetadataImpl(KindID);
}
/// getMetadata - Get the metadata of given kind attached to this Instruction.
/// If the metadata is not found then return null.
MDNode *getMetadata(StringRef Kind) const {
- if (!hasMetadata()) return 0;
+ if (!hasMetadata()) return nullptr;
return getMetadataImpl(Kind);
}
@@ -461,7 +461,7 @@ protected:
}
Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
BasicBlock *InsertAtEnd);
virtual Instruction *clone_impl() const = 0;
diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h
index 06d7287..7d338a6 100644
--- a/include/llvm/IR/Instructions.h
+++ b/include/llvm/IR/Instructions.h
@@ -60,16 +60,17 @@ class AllocaInst : public UnaryInstruction {
protected:
AllocaInst *clone_impl() const override;
public:
- explicit AllocaInst(Type *Ty, Value *ArraySize = 0,
- const Twine &Name = "", Instruction *InsertBefore = 0);
+ explicit AllocaInst(Type *Ty, Value *ArraySize = nullptr,
+ const Twine &Name = "",
+ Instruction *InsertBefore = nullptr);
AllocaInst(Type *Ty, Value *ArraySize,
const Twine &Name, BasicBlock *InsertAtEnd);
- AllocaInst(Type *Ty, const Twine &Name, Instruction *InsertBefore = 0);
+ AllocaInst(Type *Ty, const Twine &Name, Instruction *InsertBefore = nullptr);
AllocaInst(Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd);
AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
- const Twine &Name = "", Instruction *InsertBefore = 0);
+ const Twine &Name = "", Instruction *InsertBefore = nullptr);
AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
const Twine &Name, BasicBlock *InsertAtEnd);
@@ -156,17 +157,17 @@ public:
LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
BasicBlock *InsertAtEnd);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
- unsigned Align, Instruction *InsertBefore = 0);
+ unsigned Align, Instruction *InsertBefore = nullptr);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, BasicBlock *InsertAtEnd);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SynchronizationScope SynchScope = CrossThread,
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SynchronizationScope SynchScope,
@@ -174,8 +175,9 @@ public:
LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
- explicit LoadInst(Value *Ptr, const char *NameStr = 0,
- bool isVolatile = false, Instruction *InsertBefore = 0);
+ explicit LoadInst(Value *Ptr, const char *NameStr = nullptr,
+ bool isVolatile = false,
+ Instruction *InsertBefore = nullptr);
LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
BasicBlock *InsertAtEnd);
@@ -280,16 +282,16 @@ public:
StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
- unsigned Align, Instruction *InsertBefore = 0);
+ unsigned Align, Instruction *InsertBefore = nullptr);
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
unsigned Align, BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SynchronizationScope SynchScope = CrossThread,
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SynchronizationScope SynchScope,
@@ -409,7 +411,7 @@ public:
// SequentiallyConsistent.
FenceInst(LLVMContext &C, AtomicOrdering Ordering,
SynchronizationScope SynchScope = CrossThread,
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
FenceInst(LLVMContext &C, AtomicOrdering Ordering,
SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd);
@@ -477,7 +479,7 @@ public:
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope,
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
@@ -651,7 +653,7 @@ public:
}
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering, SynchronizationScope SynchScope,
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering, SynchronizationScope SynchScope,
BasicBlock *InsertAtEnd);
@@ -779,7 +781,7 @@ protected:
public:
static GetElementPtrInst *Create(Value *Ptr, ArrayRef<Value *> IdxList,
const Twine &NameStr = "",
- Instruction *InsertBefore = 0) {
+ Instruction *InsertBefore = nullptr) {
unsigned Values = 1 + unsigned(IdxList.size());
return new(Values)
GetElementPtrInst(Ptr, IdxList, Values, NameStr, InsertBefore);
@@ -797,7 +799,7 @@ public:
static GetElementPtrInst *CreateInBounds(Value *Ptr,
ArrayRef<Value *> IdxList,
const Twine &NameStr = "",
- Instruction *InsertBefore = 0) {
+ Instruction *InsertBefore = nullptr){
GetElementPtrInst *GEP = Create(Ptr, IdxList, NameStr, InsertBefore);
GEP->setIsInBounds(true);
return GEP;
@@ -1237,7 +1239,7 @@ public:
static CallInst *Create(Value *Func,
ArrayRef<Value *> Args,
const Twine &NameStr = "",
- Instruction *InsertBefore = 0) {
+ Instruction *InsertBefore = nullptr) {
return new(unsigned(Args.size() + 1))
CallInst(Func, Args, NameStr, InsertBefore);
}
@@ -1248,7 +1250,7 @@ public:
CallInst(Func, Args, NameStr, InsertAtEnd);
}
static CallInst *Create(Value *F, const Twine &NameStr = "",
- Instruction *InsertBefore = 0) {
+ Instruction *InsertBefore = nullptr) {
return new(1) CallInst(F, NameStr, InsertBefore);
}
static CallInst *Create(Value *F, const Twine &NameStr,
@@ -1263,13 +1265,13 @@ public:
/// 3. Bitcast the result of the malloc call to the specified type.
static Instruction *CreateMalloc(Instruction *InsertBefore,
Type *IntPtrTy, Type *AllocTy,
- Value *AllocSize, Value *ArraySize = 0,
- Function* MallocF = 0,
+ Value *AllocSize, Value *ArraySize = nullptr,
+ Function* MallocF = nullptr,
const Twine &Name = "");
static Instruction *CreateMalloc(BasicBlock *InsertAtEnd,
Type *IntPtrTy, Type *AllocTy,
- Value *AllocSize, Value *ArraySize = 0,
- Function* MallocF = 0,
+ Value *AllocSize, Value *ArraySize = nullptr,
+ Function* MallocF = nullptr,
const Twine &Name = "");
/// CreateFree - Generate the IR for a call to the builtin free function.
static Instruction* CreateFree(Value* Source, Instruction *InsertBefore);
@@ -1277,10 +1279,24 @@ public:
~CallInst();
- bool isTailCall() const { return getSubclassDataFromInstruction() & 1; }
+ // Note that 'musttail' implies 'tail'.
+ enum TailCallKind { TCK_None = 0, TCK_Tail = 1, TCK_MustTail = 2 };
+ TailCallKind getTailCallKind() const {
+ return TailCallKind(getSubclassDataFromInstruction() & 3);
+ }
+ bool isTailCall() const {
+ return (getSubclassDataFromInstruction() & 3) != TCK_None;
+ }
+ bool isMustTailCall() const {
+ return (getSubclassDataFromInstruction() & 3) == TCK_MustTail;
+ }
void setTailCall(bool isTC = true) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
- unsigned(isTC));
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
+ unsigned(isTC ? TCK_Tail : TCK_None));
+ }
+ void setTailCallKind(TailCallKind TCK) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
+ unsigned(TCK));
}
/// Provide fast operand accessors
@@ -1314,11 +1330,11 @@ public:
/// getCallingConv/setCallingConv - Get or set the calling convention of this
/// function call.
CallingConv::ID getCallingConv() const {
- return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 1);
+ return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
}
void setCallingConv(CallingConv::ID CC) {
- setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
- (static_cast<unsigned>(CC) << 1));
+ setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
+ (static_cast<unsigned>(CC) << 2));
}
/// getAttributes - Return the parameter attributes for this call.
@@ -1520,7 +1536,7 @@ protected:
public:
static SelectInst *Create(Value *C, Value *S1, Value *S2,
const Twine &NameStr = "",
- Instruction *InsertBefore = 0) {
+ Instruction *InsertBefore = nullptr) {
return new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
}
static SelectInst *Create(Value *C, Value *S1, Value *S2,
@@ -1575,7 +1591,7 @@ protected:
public:
VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
- Instruction *InsertBefore = 0)
+ Instruction *InsertBefore = nullptr)
: UnaryInstruction(Ty, VAArg, List, InsertBefore) {
setName(NameStr);
}
@@ -1607,7 +1623,7 @@ public:
///
class ExtractElementInst : public Instruction {
ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
BasicBlock *InsertAtEnd);
protected:
@@ -1616,7 +1632,7 @@ protected:
public:
static ExtractElementInst *Create(Value *Vec, Value *Idx,
const Twine &NameStr = "",
- Instruction *InsertBefore = 0) {
+ Instruction *InsertBefore = nullptr) {
return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
}
static ExtractElementInst *Create(Value *Vec, Value *Idx,
@@ -1668,7 +1684,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)
class InsertElementInst : public Instruction {
InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
const Twine &NameStr = "",
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
const Twine &NameStr, BasicBlock *InsertAtEnd);
protected:
@@ -1677,7 +1693,7 @@ protected:
public:
static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
const Twine &NameStr = "",
- Instruction *InsertBefore = 0) {
+ Instruction *InsertBefore = nullptr) {
return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
}
static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
@@ -1734,7 +1750,7 @@ public:
}
ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
const Twine &NameStr = "",
- Instruction *InsertBefor = 0);
+ Instruction *InsertBefor = nullptr);
ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
const Twine &NameStr, BasicBlock *InsertAtEnd);
@@ -1832,7 +1848,7 @@ public:
static ExtractValueInst *Create(Value *Agg,
ArrayRef<unsigned> Idxs,
const Twine &NameStr = "",
- Instruction *InsertBefore = 0) {
+ Instruction *InsertBefore = nullptr) {
return new
ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
}
@@ -1933,7 +1949,7 @@ class InsertValueInst : public Instruction {
/// and two index insertvalue instructions are so common.
InsertValueInst(Value *Agg, Value *Val,
unsigned Idx, const Twine &NameStr = "",
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
const Twine &NameStr, BasicBlock *InsertAtEnd);
protected:
@@ -1947,7 +1963,7 @@ public:
static InsertValueInst *Create(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const Twine &NameStr = "",
- Instruction *InsertBefore = 0) {
+ Instruction *InsertBefore = nullptr) {
return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
}
static InsertValueInst *Create(Value *Agg, Value *Val,
@@ -2052,8 +2068,9 @@ class PHINode : public Instruction {
return User::operator new(s, 0);
}
explicit PHINode(Type *Ty, unsigned NumReservedValues,
- const Twine &NameStr = "", Instruction *InsertBefore = 0)
- : Instruction(Ty, Instruction::PHI, 0, 0, InsertBefore),
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr)
+ : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
ReservedSpace(NumReservedValues) {
setName(NameStr);
OperandList = allocHungoffUses(ReservedSpace);
@@ -2061,7 +2078,7 @@ class PHINode : public Instruction {
PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
BasicBlock *InsertAtEnd)
- : Instruction(Ty, Instruction::PHI, 0, 0, InsertAtEnd),
+ : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
ReservedSpace(NumReservedValues) {
setName(NameStr);
OperandList = allocHungoffUses(ReservedSpace);
@@ -2078,7 +2095,7 @@ public:
/// edges that this phi node will have (use 0 if you really have no idea).
static PHINode *Create(Type *Ty, unsigned NumReservedValues,
const Twine &NameStr = "",
- Instruction *InsertBefore = 0) {
+ Instruction *InsertBefore = nullptr) {
return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
}
static PHINode *Create(Type *Ty, unsigned NumReservedValues,
@@ -2270,7 +2287,7 @@ public:
static LandingPadInst *Create(Type *RetTy, Value *PersonalityFn,
unsigned NumReservedClauses,
const Twine &NameStr = "",
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
static LandingPadInst *Create(Type *RetTy, Value *PersonalityFn,
unsigned NumReservedClauses,
const Twine &NameStr, BasicBlock *InsertAtEnd);
@@ -2356,15 +2373,15 @@ private:
//
// NOTE: If the Value* passed is of type void then the constructor behaves as
// if it was passed NULL.
- explicit ReturnInst(LLVMContext &C, Value *retVal = 0,
- Instruction *InsertBefore = 0);
+ explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
+ Instruction *InsertBefore = nullptr);
ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
protected:
ReturnInst *clone_impl() const override;
public:
- static ReturnInst* Create(LLVMContext &C, Value *retVal = 0,
- Instruction *InsertBefore = 0) {
+ static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
+ Instruction *InsertBefore = nullptr) {
return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
}
static ReturnInst* Create(LLVMContext &C, Value *retVal,
@@ -2381,7 +2398,7 @@ public:
/// Convenience accessor. Returns null if there is no return value.
Value *getReturnValue() const {
- return getNumOperands() != 0 ? getOperand(0) : 0;
+ return getNumOperands() != 0 ? getOperand(0) : nullptr;
}
unsigned getNumSuccessors() const { return 0; }
@@ -2426,20 +2443,21 @@ class BranchInst : public TerminatorInst {
// BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
// BranchInst(BB* B, BB *I) - 'br B' insert at end
// BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
- explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = 0);
+ explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
- Instruction *InsertBefore = 0);
+ Instruction *InsertBefore = nullptr);
BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
BasicBlock *InsertAtEnd);
protected:
BranchInst *clone_impl() const override;
public:
- static BranchInst *Create(BasicBlock *IfTrue, Instruction *InsertBefore = 0) {
+ static BranchInst *Create(BasicBlock *IfTrue,
+ Instruction *InsertBefore = nullptr) {
return new(1) BranchInst(IfTrue, InsertBefore);
}
static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
- Value *Cond, Instruction *InsertBefore = 0) {
+ Value *Cond, Instruction *InsertBefore = nullptr) {
return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
}
static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
@@ -2658,7 +2676,8 @@ public:
};
static SwitchInst *Create(Value *Value, BasicBlock *Default,
- unsigned NumCases, Instruction *InsertBefore = 0) {
+ unsigned NumCases,
+ Instruction *InsertBefore = nullptr) {
return new SwitchInst(Value, Default, NumCases, InsertBefore);
}
static SwitchInst *Create(Value *Value, BasicBlock *Default,
@@ -2742,12 +2761,12 @@ public:
/// findCaseDest - Finds the unique case value for a given successor. Returns
/// null if the successor is not found, not unique, or is the default case.
ConstantInt *findCaseDest(BasicBlock *BB) {
- if (BB == getDefaultDest()) return NULL;
+ if (BB == getDefaultDest()) return nullptr;
- ConstantInt *CI = NULL;
+ ConstantInt *CI = nullptr;
for (CaseIt i = case_begin(), e = case_end(); i != e; ++i) {
if (i.getCaseSuccessor() == BB) {
- if (CI) return NULL; // Multiple cases lead to BB.
+ if (CI) return nullptr; // Multiple cases lead to BB.
else CI = i.getCaseValue();
}
}
@@ -2834,7 +2853,7 @@ protected:
IndirectBrInst *clone_impl() const override;
public:
static IndirectBrInst *Create(Value *Address, unsigned NumDests,
- Instruction *InsertBefore = 0) {
+ Instruction *InsertBefore = nullptr) {
return new IndirectBrInst(Address, NumDests, InsertBefore);
}
static IndirectBrInst *Create(Value *Address, unsigned NumDests,
@@ -2928,7 +2947,7 @@ public:
static InvokeInst *Create(Value *Func,
BasicBlock *IfNormal, BasicBlock *IfException,
ArrayRef<Value *> Args, const Twine &NameStr = "",
- Instruction *InsertBefore = 0) {
+ Instruction *InsertBefore = nullptr) {
unsigned Values = unsigned(Args.size()) + 3;
return new(Values) InvokeInst(Func, IfNormal, IfException, Args,
Values, NameStr, InsertBefore);
@@ -3175,12 +3194,12 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InvokeInst, Value)
class ResumeInst : public TerminatorInst {
ResumeInst(const ResumeInst &RI);
- explicit ResumeInst(Value *Exn, Instruction *InsertBefore=0);
+ explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
protected:
ResumeInst *clone_impl() const override;
public:
- static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = 0) {
+ static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
return new(1) ResumeInst(Exn, InsertBefore);
}
static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
@@ -3234,7 +3253,7 @@ public:
void *operator new(size_t s) {
return User::operator new(s, 0);
}
- explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = 0);
+ explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
unsigned getNumSuccessors() const { return 0; }
@@ -3265,16 +3284,16 @@ protected:
public:
/// \brief Constructor with insert-before-instruction semantics
TruncInst(
- Value *S, ///< The value to be truncated
- Type *Ty, ///< The (smaller) type to truncate to
- const Twine &NameStr = "", ///< A name for the new instruction
- Instruction *InsertBefore = 0 ///< Where to insert the new instruction
+ Value *S, ///< The value to be truncated
+ Type *Ty, ///< The (smaller) type to truncate to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
/// \brief Constructor with insert-at-end-of-block semantics
TruncInst(
Value *S, ///< The value to be truncated
- Type *Ty, ///< The (smaller) type to truncate to
+ Type *Ty, ///< The (smaller) type to truncate to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3301,16 +3320,16 @@ protected:
public:
/// \brief Constructor with insert-before-instruction semantics
ZExtInst(
- Value *S, ///< The value to be zero extended
- Type *Ty, ///< The type to zero extend to
- const Twine &NameStr = "", ///< A name for the new instruction
- Instruction *InsertBefore = 0 ///< Where to insert the new instruction
+ Value *S, ///< The value to be zero extended
+ Type *Ty, ///< The type to zero extend to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
/// \brief Constructor with insert-at-end semantics.
ZExtInst(
Value *S, ///< The value to be zero extended
- Type *Ty, ///< The type to zero extend to
+ Type *Ty, ///< The type to zero extend to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3337,16 +3356,16 @@ protected:
public:
/// \brief Constructor with insert-before-instruction semantics
SExtInst(
- Value *S, ///< The value to be sign extended
- Type *Ty, ///< The type to sign extend to
- const Twine &NameStr = "", ///< A name for the new instruction
- Instruction *InsertBefore = 0 ///< Where to insert the new instruction
+ Value *S, ///< The value to be sign extended
+ Type *Ty, ///< The type to sign extend to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
/// \brief Constructor with insert-at-end-of-block semantics
SExtInst(
Value *S, ///< The value to be sign extended
- Type *Ty, ///< The type to sign extend to
+ Type *Ty, ///< The type to sign extend to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3373,16 +3392,16 @@ protected:
public:
/// \brief Constructor with insert-before-instruction semantics
FPTruncInst(
- Value *S, ///< The value to be truncated
- Type *Ty, ///< The type to truncate to
- const Twine &NameStr = "", ///< A name for the new instruction
- Instruction *InsertBefore = 0 ///< Where to insert the new instruction
+ Value *S, ///< The value to be truncated
+ Type *Ty, ///< The type to truncate to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
/// \brief Constructor with insert-before-instruction semantics
FPTruncInst(
Value *S, ///< The value to be truncated
- Type *Ty, ///< The type to truncate to
+ Type *Ty, ///< The type to truncate to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3409,16 +3428,16 @@ protected:
public:
/// \brief Constructor with insert-before-instruction semantics
FPExtInst(
- Value *S, ///< The value to be extended
- Type *Ty, ///< The type to extend to
- const Twine &NameStr = "", ///< A name for the new instruction
- Instruction *InsertBefore = 0 ///< Where to insert the new instruction
+ Value *S, ///< The value to be extended
+ Type *Ty, ///< The type to extend to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
/// \brief Constructor with insert-at-end-of-block semantics
FPExtInst(
Value *S, ///< The value to be extended
- Type *Ty, ///< The type to extend to
+ Type *Ty, ///< The type to extend to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3445,16 +3464,16 @@ protected:
public:
/// \brief Constructor with insert-before-instruction semantics
UIToFPInst(
- Value *S, ///< The value to be converted
- Type *Ty, ///< The type to convert to
- const Twine &NameStr = "", ///< A name for the new instruction
- Instruction *InsertBefore = 0 ///< Where to insert the new instruction
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
/// \brief Constructor with insert-at-end-of-block semantics
UIToFPInst(
Value *S, ///< The value to be converted
- Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3481,16 +3500,16 @@ protected:
public:
/// \brief Constructor with insert-before-instruction semantics
SIToFPInst(
- Value *S, ///< The value to be converted
- Type *Ty, ///< The type to convert to
- const Twine &NameStr = "", ///< A name for the new instruction
- Instruction *InsertBefore = 0 ///< Where to insert the new instruction
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
/// \brief Constructor with insert-at-end-of-block semantics
SIToFPInst(
Value *S, ///< The value to be converted
- Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3517,16 +3536,16 @@ protected:
public:
/// \brief Constructor with insert-before-instruction semantics
FPToUIInst(
- Value *S, ///< The value to be converted
- Type *Ty, ///< The type to convert to
- const Twine &NameStr = "", ///< A name for the new instruction
- Instruction *InsertBefore = 0 ///< Where to insert the new instruction
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
/// \brief Constructor with insert-at-end-of-block semantics
FPToUIInst(
Value *S, ///< The value to be converted
- Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< Where to insert the new instruction
);
@@ -3553,16 +3572,16 @@ protected:
public:
/// \brief Constructor with insert-before-instruction semantics
FPToSIInst(
- Value *S, ///< The value to be converted
- Type *Ty, ///< The type to convert to
- const Twine &NameStr = "", ///< A name for the new instruction
- Instruction *InsertBefore = 0 ///< Where to insert the new instruction
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
/// \brief Constructor with insert-at-end-of-block semantics
FPToSIInst(
Value *S, ///< The value to be converted
- Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3585,16 +3604,16 @@ class IntToPtrInst : public CastInst {
public:
/// \brief Constructor with insert-before-instruction semantics
IntToPtrInst(
- Value *S, ///< The value to be converted
- Type *Ty, ///< The type to convert to
- const Twine &NameStr = "", ///< A name for the new instruction
- Instruction *InsertBefore = 0 ///< Where to insert the new instruction
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
/// \brief Constructor with insert-at-end-of-block semantics
IntToPtrInst(
Value *S, ///< The value to be converted
- Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3629,16 +3648,16 @@ protected:
public:
/// \brief Constructor with insert-before-instruction semantics
PtrToIntInst(
- Value *S, ///< The value to be converted
- Type *Ty, ///< The type to convert to
- const Twine &NameStr = "", ///< A name for the new instruction
- Instruction *InsertBefore = 0 ///< Where to insert the new instruction
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
/// \brief Constructor with insert-at-end-of-block semantics
PtrToIntInst(
Value *S, ///< The value to be converted
- Type *Ty, ///< The type to convert to
+ Type *Ty, ///< The type to convert to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3677,16 +3696,16 @@ protected:
public:
/// \brief Constructor with insert-before-instruction semantics
BitCastInst(
- Value *S, ///< The value to be casted
- Type *Ty, ///< The type to casted to
- const Twine &NameStr = "", ///< A name for the new instruction
- Instruction *InsertBefore = 0 ///< Where to insert the new instruction
+ Value *S, ///< The value to be casted
+ Type *Ty, ///< The type to casted to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
/// \brief Constructor with insert-at-end-of-block semantics
BitCastInst(
Value *S, ///< The value to be casted
- Type *Ty, ///< The type to casted to
+ Type *Ty, ///< The type to casted to
const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3714,10 +3733,10 @@ protected:
public:
/// \brief Constructor with insert-before-instruction semantics
AddrSpaceCastInst(
- Value *S, ///< The value to be casted
- Type *Ty, ///< The type to casted to
- const Twine &NameStr = "", ///< A name for the new instruction
- Instruction *InsertBefore = 0 ///< Where to insert the new instruction
+ Value *S, ///< The value to be casted
+ Type *Ty, ///< The type to casted to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
);
/// \brief Constructor with insert-at-end-of-block semantics
diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td
index 6a48f17..edd1621 100644
--- a/include/llvm/IR/Intrinsics.td
+++ b/include/llvm/IR/Intrinsics.td
@@ -250,6 +250,10 @@ def int_gcwrite : Intrinsic<[],
//
def int_returnaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_frameaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_read_register : Intrinsic<[llvm_anyint_ty], [llvm_metadata_ty],
+ [IntrNoMem], "llvm.read_register">;
+def int_write_register : Intrinsic<[], [llvm_metadata_ty, llvm_anyint_ty],
+ [], "llvm.write_register">;
// Note: we treat stacksave/stackrestore as writemem because we don't otherwise
// model their dependencies on allocas.
@@ -529,7 +533,6 @@ def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
include "llvm/IR/IntrinsicsPowerPC.td"
include "llvm/IR/IntrinsicsX86.td"
include "llvm/IR/IntrinsicsARM.td"
-include "llvm/IR/IntrinsicsARM64.td"
include "llvm/IR/IntrinsicsAArch64.td"
include "llvm/IR/IntrinsicsXCore.td"
include "llvm/IR/IntrinsicsHexagon.td"
diff --git a/include/llvm/IR/IntrinsicsAArch64.td b/include/llvm/IR/IntrinsicsAArch64.td
index 61c0e5d..23757aa 100644
--- a/include/llvm/IR/IntrinsicsAArch64.td
+++ b/include/llvm/IR/IntrinsicsAArch64.td
@@ -1,4 +1,4 @@
-//===- IntrinsicsAArch64.td - Defines AArch64 intrinsics -----------*- tablegen -*-===//
+//===- IntrinsicsAARCH64.td - Defines AARCH64 intrinsics ---*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,401 +7,630 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines all of the AArch64-specific intrinsics.
+// This file defines all of the AARCH64-specific intrinsics.
//
//===----------------------------------------------------------------------===//
+let TargetPrefix = "aarch64" in {
+
+def int_aarch64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
+def int_aarch64_ldaxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
+def int_aarch64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
+def int_aarch64_stlxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
+
+def int_aarch64_ldxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
+def int_aarch64_ldaxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
+def int_aarch64_stxp : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
+def int_aarch64_stlxp : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
+
+def int_aarch64_clrex : Intrinsic<[]>;
+
+def int_aarch64_sdiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
+ LLVMMatchType<0>], [IntrNoMem]>;
+def int_aarch64_udiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
+ LLVMMatchType<0>], [IntrNoMem]>;
+}
+
//===----------------------------------------------------------------------===//
// Advanced SIMD (NEON)
let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
+ class AdvSIMD_2Scalar_Float_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+ class AdvSIMD_FPToIntRounding_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+
+ class AdvSIMD_1IntArg_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ class AdvSIMD_1FloatArg_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Expand_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
+ class AdvSIMD_1IntArg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Int_Across_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Float_Across_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+
+ class AdvSIMD_2IntArg_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2FloatArg_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Compare_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
+ [IntrNoMem]>;
+ class AdvSIMD_2Arg_FloatCompare_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Wide_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMTruncatedType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMExtendedType<0>, LLVMExtendedType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2Arg_Scalar_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyint_ty],
+ [LLVMExtendedType<0>, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMTruncatedType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMTruncatedType<0>, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty],
+ [IntrNoMem]>;
+
+ class AdvSIMD_3VectorArg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_3VectorArg_Scalar_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty,
+ LLVMMatchType<1>], [IntrNoMem]>;
+ class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_CvtFxToFP_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_CvtFPToFx_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+}
-// Vector Absolute Compare (Floating Point)
-def int_aarch64_neon_vacgeq :
- Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
-def int_aarch64_neon_vacgtq :
- Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
-
-// Vector saturating accumulate
-def int_aarch64_neon_suqadd : Neon_2Arg_Intrinsic;
-def int_aarch64_neon_usqadd : Neon_2Arg_Intrinsic;
-
-// Vector Bitwise reverse
-def int_aarch64_neon_rbit : Neon_1Arg_Intrinsic;
-
-// Vector extract and narrow
-def int_aarch64_neon_xtn :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-
-// Vector floating-point convert
-def int_aarch64_neon_frintn : Neon_1Arg_Intrinsic;
-def int_aarch64_neon_fsqrt : Neon_1Arg_Intrinsic;
-def int_aarch64_neon_vcvtxn :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_vcvtzs :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_vcvtzu :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-
-// Vector maxNum (Floating Point)
-def int_aarch64_neon_vmaxnm : Neon_2Arg_Intrinsic;
-
-// Vector minNum (Floating Point)
-def int_aarch64_neon_vminnm : Neon_2Arg_Intrinsic;
-
-// Vector Pairwise maxNum (Floating Point)
-def int_aarch64_neon_vpmaxnm : Neon_2Arg_Intrinsic;
-
-// Vector Pairwise minNum (Floating Point)
-def int_aarch64_neon_vpminnm : Neon_2Arg_Intrinsic;
-
-// Vector Multiply Extended and Scalar Multiply Extended (Floating Point)
-def int_aarch64_neon_vmulx :
- Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>]>;
-
-class Neon_N2V_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem]>;
-class Neon_N3V_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem]>;
-class Neon_N2V_Narrow_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMExtendedType<0>, llvm_i32_ty],
- [IntrNoMem]>;
-
-// Vector rounding shift right by immediate (Signed)
-def int_aarch64_neon_vsrshr : Neon_N2V_Intrinsic;
-def int_aarch64_neon_vurshr : Neon_N2V_Intrinsic;
-def int_aarch64_neon_vsqshlu : Neon_N2V_Intrinsic;
-
-def int_aarch64_neon_vsri : Neon_N3V_Intrinsic;
-def int_aarch64_neon_vsli : Neon_N3V_Intrinsic;
-
-def int_aarch64_neon_vsqshrun : Neon_N2V_Narrow_Intrinsic;
-def int_aarch64_neon_vrshrn : Neon_N2V_Narrow_Intrinsic;
-def int_aarch64_neon_vsqrshrun : Neon_N2V_Narrow_Intrinsic;
-def int_aarch64_neon_vsqshrn : Neon_N2V_Narrow_Intrinsic;
-def int_aarch64_neon_vuqshrn : Neon_N2V_Narrow_Intrinsic;
-def int_aarch64_neon_vsqrshrn : Neon_N2V_Narrow_Intrinsic;
-def int_aarch64_neon_vuqrshrn : Neon_N2V_Narrow_Intrinsic;
-
-// Vector across
-class Neon_Across_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-
-def int_aarch64_neon_saddlv : Neon_Across_Intrinsic;
-def int_aarch64_neon_uaddlv : Neon_Across_Intrinsic;
-def int_aarch64_neon_smaxv : Neon_Across_Intrinsic;
-def int_aarch64_neon_umaxv : Neon_Across_Intrinsic;
-def int_aarch64_neon_sminv : Neon_Across_Intrinsic;
-def int_aarch64_neon_uminv : Neon_Across_Intrinsic;
-def int_aarch64_neon_vaddv : Neon_Across_Intrinsic;
-def int_aarch64_neon_vmaxv :
- Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vminv :
- Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vmaxnmv :
- Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
-def int_aarch64_neon_vminnmv :
- Intrinsic<[llvm_float_ty], [llvm_v4f32_ty], [IntrNoMem]>;
-
-// Vector Table Lookup.
-def int_aarch64_neon_vtbl1 :
- Intrinsic<[llvm_anyvector_ty],
- [llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
-
-def int_aarch64_neon_vtbl2 :
- Intrinsic<[llvm_anyvector_ty],
- [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
- [IntrNoMem]>;
-
-def int_aarch64_neon_vtbl3 :
- Intrinsic<[llvm_anyvector_ty],
- [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
- LLVMMatchType<0>], [IntrNoMem]>;
-
-def int_aarch64_neon_vtbl4 :
- Intrinsic<[llvm_anyvector_ty],
- [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
-
-// Vector Table Extension.
-// Some elements of the destination vector may not be updated, so the original
-// value of that vector is passed as the first argument. The next 1-4
-// arguments after that are the table.
-def int_aarch64_neon_vtbx1 :
- Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
- [IntrNoMem]>;
-
-def int_aarch64_neon_vtbx2 :
- Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
- LLVMMatchType<0>], [IntrNoMem]>;
-
-def int_aarch64_neon_vtbx3 :
- Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
-
-def int_aarch64_neon_vtbx4 :
- Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
- [IntrNoMem]>;
-
-// Vector Load/store
-def int_aarch64_neon_vld1x2 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
- [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
-def int_aarch64_neon_vld1x3 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>],
- [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
-def int_aarch64_neon_vld1x4 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>],
- [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
-
-def int_aarch64_neon_vst1x2 : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, llvm_i32_ty],
- [IntrReadWriteArgMem]>;
-def int_aarch64_neon_vst1x3 : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i32_ty], [IntrReadWriteArgMem]>;
-def int_aarch64_neon_vst1x4 : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_i32_ty],
- [IntrReadWriteArgMem]>;
-
-// Scalar Add
-def int_aarch64_neon_vaddds :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
-def int_aarch64_neon_vadddu :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
-
-
-// Scalar Sub
-def int_aarch64_neon_vsubds :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
-def int_aarch64_neon_vsubdu :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
-
-
-// Scalar Shift
-// Scalar Shift Left
-def int_aarch64_neon_vshlds :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
-def int_aarch64_neon_vshldu :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
-
-// Scalar Saturating Shift Left
-def int_aarch64_neon_vqshls : Neon_2Arg_Intrinsic;
-def int_aarch64_neon_vqshlu : Neon_2Arg_Intrinsic;
-
-// Scalar Shift Rouding Left
-def int_aarch64_neon_vrshlds :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
-def int_aarch64_neon_vrshldu :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
-
-// Scalar Saturating Rounding Shift Left
-def int_aarch64_neon_vqrshls : Neon_2Arg_Intrinsic;
-def int_aarch64_neon_vqrshlu : Neon_2Arg_Intrinsic;
-
-// Scalar Reduce Pairwise Add.
-def int_aarch64_neon_vpadd :
- Intrinsic<[llvm_v1i64_ty], [llvm_v2i64_ty],[IntrNoMem]>;
-def int_aarch64_neon_vpfadd :
- Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-
-// Scalar Reduce Pairwise Floating Point Max/Min.
-def int_aarch64_neon_vpmax :
- Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_vpmin :
- Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-
-// Scalar Reduce Pairwise Floating Point Maxnm/Minnm.
-def int_aarch64_neon_vpfmaxnm :
- Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-def int_aarch64_neon_vpfminnm :
- Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-
-// Scalar Signed Integer Convert To Floating-point
-def int_aarch64_neon_vcvtint2fps :
- Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-
-// Scalar Unsigned Integer Convert To Floating-point
-def int_aarch64_neon_vcvtint2fpu :
- Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-
-// Scalar Floating-point Convert
-def int_aarch64_neon_fcvtxn :
- Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtns :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtnu :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtps :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtpu :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtms :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtmu :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtas :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtau :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtzs :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
-def int_aarch64_neon_fcvtzu :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
-
-// Scalar Floating-point Reciprocal Estimate.
-def int_aarch64_neon_vrecpe :
- Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
-
-// Scalar Floating-point Reciprocal Exponent
-def int_aarch64_neon_vrecpx :
- Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
-
-// Scalar Floating-point Reciprocal Square Root Estimate
-def int_aarch64_neon_vrsqrte :
- Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
-
-// Scalar Floating-point Reciprocal Step
-def int_aarch64_neon_vrecps :
- Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem]>;
-
-// Scalar Floating-point Reciprocal Square Root Step
-def int_aarch64_neon_vrsqrts :
- Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem]>;
-
-// Compare with vector operands.
-class Neon_Cmp_Intrinsic :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyvector_ty],
- [IntrNoMem]>;
-
-// Floating-point compare with scalar operands.
-class Neon_Float_Cmp_Intrinsic :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty, llvm_anyfloat_ty],
- [IntrNoMem]>;
-
-// Scalar Compare Equal
-def int_aarch64_neon_vceq : Neon_Cmp_Intrinsic;
-def int_aarch64_neon_fceq : Neon_Float_Cmp_Intrinsic;
-
-// Scalar Compare Greater-Than or Equal
-def int_aarch64_neon_vcge : Neon_Cmp_Intrinsic;
-def int_aarch64_neon_vchs : Neon_Cmp_Intrinsic;
-def int_aarch64_neon_fcge : Neon_Float_Cmp_Intrinsic;
-def int_aarch64_neon_fchs : Neon_Float_Cmp_Intrinsic;
-
-// Scalar Compare Less-Than or Equal
-def int_aarch64_neon_vclez : Neon_Cmp_Intrinsic;
-def int_aarch64_neon_fclez : Neon_Float_Cmp_Intrinsic;
-
-// Scalar Compare Less-Than
-def int_aarch64_neon_vcltz : Neon_Cmp_Intrinsic;
-def int_aarch64_neon_fcltz : Neon_Float_Cmp_Intrinsic;
-
-// Scalar Compare Greater-Than
-def int_aarch64_neon_vcgt : Neon_Cmp_Intrinsic;
-def int_aarch64_neon_vchi : Neon_Cmp_Intrinsic;
-def int_aarch64_neon_fcgt : Neon_Float_Cmp_Intrinsic;
-def int_aarch64_neon_fchi : Neon_Float_Cmp_Intrinsic;
-
-// Scalar Compare Bitwise Test Bits
-def int_aarch64_neon_vtstd : Neon_Cmp_Intrinsic;
-
-// Scalar Floating-point Absolute Compare Greater Than Or Equal
-def int_aarch64_neon_vcage : Neon_Cmp_Intrinsic;
-def int_aarch64_neon_fcage : Neon_Float_Cmp_Intrinsic;
-
-// Scalar Floating-point Absolute Compare Greater Than
-def int_aarch64_neon_vcagt : Neon_Cmp_Intrinsic;
-def int_aarch64_neon_fcagt : Neon_Float_Cmp_Intrinsic;
-
-// Scalar Signed Saturating Accumulated of Unsigned Value
-def int_aarch64_neon_vuqadd : Neon_2Arg_Intrinsic;
-
-// Scalar Unsigned Saturating Accumulated of Signed Value
-def int_aarch64_neon_vsqadd : Neon_2Arg_Intrinsic;
-
-// Scalar Absolute Value
-def int_aarch64_neon_vabs :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>;
-
-// Scalar Absolute Difference
-def int_aarch64_neon_vabd :
- Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem]>;
-
-// Scalar Negate Value
-def int_aarch64_neon_vneg :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>;
-
-// Signed Saturating Doubling Multiply-Add Long
-def int_aarch64_neon_vqdmlal : Neon_3Arg_Long_Intrinsic;
-
-// Signed Saturating Doubling Multiply-Subtract Long
-def int_aarch64_neon_vqdmlsl : Neon_3Arg_Long_Intrinsic;
-
-def int_aarch64_neon_vmull_p64 :
- Intrinsic<[llvm_v16i8_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
+// Arithmetic ops
-class Neon_2Arg_ShiftImm_Intrinsic
- : Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
+let Properties = [IntrNoMem] in {
+ // Vector Add Across Lanes
+ def int_aarch64_neon_saddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_uaddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_faddv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+
+ // Vector Long Add Across Lanes
+ def int_aarch64_neon_saddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_uaddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+
+ // Vector Halving Add
+ def int_aarch64_neon_shadd : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_uhadd : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Rounding Halving Add
+ def int_aarch64_neon_srhadd : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_urhadd : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Saturating Add
+ def int_aarch64_neon_sqadd : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_suqadd : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_usqadd : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_uqadd : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Add High-Half
+ // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
+ // header is no longer supported.
+ def int_aarch64_neon_addhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+ // Vector Rounding Add High-Half
+ def int_aarch64_neon_raddhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+ // Vector Saturating Doubling Multiply High
+ def int_aarch64_neon_sqdmulh : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Saturating Rounding Doubling Multiply High
+ def int_aarch64_neon_sqrdmulh : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Polynominal Multiply
+ def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Long Multiply
+ def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
+ def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
+ def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
+
+ // 64-bit polynomial multiply really returns an i128, which is not legal. Fake
+ // it with a v16i8.
+ def int_aarch64_neon_pmull64 :
+ Intrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+
+ // Vector Extending Multiply
+ def int_aarch64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic {
+ let Properties = [IntrNoMem, Commutative];
+ }
+
+ // Vector Saturating Doubling Long Multiply
+ def int_aarch64_neon_sqdmull : AdvSIMD_2VectorArg_Long_Intrinsic;
+ def int_aarch64_neon_sqdmulls_scalar
+ : Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ // Vector Halving Subtract
+ def int_aarch64_neon_shsub : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_uhsub : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Saturating Subtract
+ def int_aarch64_neon_sqsub : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_uqsub : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Subtract High-Half
+ // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
+ // header is no longer supported.
+ def int_aarch64_neon_subhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+ // Vector Rounding Subtract High-Half
+ def int_aarch64_neon_rsubhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+ // Vector Compare Absolute Greater-than-or-equal
+ def int_aarch64_neon_facge : AdvSIMD_2Arg_FloatCompare_Intrinsic;
+
+ // Vector Compare Absolute Greater-than
+ def int_aarch64_neon_facgt : AdvSIMD_2Arg_FloatCompare_Intrinsic;
+
+ // Vector Absolute Difference
+ def int_aarch64_neon_sabd : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_uabd : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fabd : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Scalar Absolute Difference
+ def int_aarch64_sisd_fabd : AdvSIMD_2Scalar_Float_Intrinsic;
+
+ // Vector Max
+ def int_aarch64_neon_smax : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_umax : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fmax : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fmaxnmp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Max Across Lanes
+ def int_aarch64_neon_smaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_umaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_fmaxv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+ def int_aarch64_neon_fmaxnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+
+ // Vector Min
+ def int_aarch64_neon_smin : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_umin : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fmin : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fminnmp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Min/Max Number
+ def int_aarch64_neon_fminnm : AdvSIMD_2FloatArg_Intrinsic;
+ def int_aarch64_neon_fmaxnm : AdvSIMD_2FloatArg_Intrinsic;
+
+ // Vector Min Across Lanes
+ def int_aarch64_neon_sminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_uminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_fminv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+ def int_aarch64_neon_fminnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+
+ // Pairwise Add
+ def int_aarch64_neon_addp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Long Pairwise Add
+ // FIXME: In theory, we shouldn't need intrinsics for saddlp or
+ // uaddlp, but tblgen's type inference currently can't handle the
+ // pattern fragments this ends up generating.
+ def int_aarch64_neon_saddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
+ def int_aarch64_neon_uaddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
-class Neon_3Arg_ShiftImm_Intrinsic
- : Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_i32_ty],
- [IntrNoMem]>;
+ // Folding Maximum
+ def int_aarch64_neon_smaxp : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_umaxp : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fmaxp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Folding Minimum
+ def int_aarch64_neon_sminp : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_uminp : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fminp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Reciprocal Estimate/Step
+ def int_aarch64_neon_frecps : AdvSIMD_2FloatArg_Intrinsic;
+ def int_aarch64_neon_frsqrts : AdvSIMD_2FloatArg_Intrinsic;
+
+ // Reciprocal Exponent
+ def int_aarch64_neon_frecpx : AdvSIMD_1FloatArg_Intrinsic;
+
+ // Vector Saturating Shift Left
+ def int_aarch64_neon_sqshl : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_uqshl : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Rounding Shift Left
+ def int_aarch64_neon_srshl : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_urshl : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Saturating Rounding Shift Left
+ def int_aarch64_neon_sqrshl : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_uqrshl : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Signed->Unsigned Shift Left by Constant
+ def int_aarch64_neon_sqshlu : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Signed->Unsigned Narrowing Saturating Shift Right by Constant
+ def int_aarch64_neon_sqshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Signed->Unsigned Rounding Narrowing Saturating Shift Right by Const
+ def int_aarch64_neon_sqrshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Narrowing Shift Right by Constant
+ def int_aarch64_neon_sqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+ def int_aarch64_neon_uqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Rounding Narrowing Shift Right by Constant
+ def int_aarch64_neon_rshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Rounding Narrowing Saturating Shift Right by Constant
+ def int_aarch64_neon_sqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+ def int_aarch64_neon_uqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Shift Left
+ def int_aarch64_neon_sshl : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_ushl : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Widening Shift Left by Constant
+ def int_aarch64_neon_shll : AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic;
+ def int_aarch64_neon_sshll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
+ def int_aarch64_neon_ushll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
+
+ // Vector Shift Right by Constant and Insert
+ def int_aarch64_neon_vsri : AdvSIMD_3VectorArg_Scalar_Intrinsic;
+
+ // Vector Shift Left by Constant and Insert
+ def int_aarch64_neon_vsli : AdvSIMD_3VectorArg_Scalar_Intrinsic;
+
+ // Vector Saturating Narrow
+ def int_aarch64_neon_scalar_sqxtn: AdvSIMD_1IntArg_Narrow_Intrinsic;
+ def int_aarch64_neon_scalar_uqxtn : AdvSIMD_1IntArg_Narrow_Intrinsic;
+ def int_aarch64_neon_sqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
+ def int_aarch64_neon_uqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
+
+ // Vector Saturating Extract and Unsigned Narrow
+ def int_aarch64_neon_scalar_sqxtun : AdvSIMD_1IntArg_Narrow_Intrinsic;
+ def int_aarch64_neon_sqxtun : AdvSIMD_1VectorArg_Narrow_Intrinsic;
+
+ // Vector Absolute Value
+ def int_aarch64_neon_abs : AdvSIMD_1IntArg_Intrinsic;
+
+ // Vector Saturating Absolute Value
+ def int_aarch64_neon_sqabs : AdvSIMD_1IntArg_Intrinsic;
+
+ // Vector Saturating Negation
+ def int_aarch64_neon_sqneg : AdvSIMD_1IntArg_Intrinsic;
+
+ // Vector Count Leading Sign Bits
+ def int_aarch64_neon_cls : AdvSIMD_1VectorArg_Intrinsic;
+
+ // Vector Reciprocal Estimate
+ def int_aarch64_neon_urecpe : AdvSIMD_1VectorArg_Intrinsic;
+ def int_aarch64_neon_frecpe : AdvSIMD_1FloatArg_Intrinsic;
+
+ // Vector Square Root Estimate
+ def int_aarch64_neon_ursqrte : AdvSIMD_1VectorArg_Intrinsic;
+ def int_aarch64_neon_frsqrte : AdvSIMD_1FloatArg_Intrinsic;
+
+ // Vector Bitwise Reverse
+ def int_aarch64_neon_rbit : AdvSIMD_1VectorArg_Intrinsic;
+
+ // Vector Conversions Between Half-Precision and Single-Precision.
+ def int_aarch64_neon_vcvtfp2hf
+ : Intrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_aarch64_neon_vcvthf2fp
+ : Intrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;
-// Scalar Shift Right (Immediate)
-def int_aarch64_neon_vshrds_n : Neon_2Arg_ShiftImm_Intrinsic;
-def int_aarch64_neon_vshrdu_n : Neon_2Arg_ShiftImm_Intrinsic;
+ // Vector Conversions Between Floating-point and Fixed-point.
+ def int_aarch64_neon_vcvtfp2fxs : AdvSIMD_CvtFPToFx_Intrinsic;
+ def int_aarch64_neon_vcvtfp2fxu : AdvSIMD_CvtFPToFx_Intrinsic;
+ def int_aarch64_neon_vcvtfxs2fp : AdvSIMD_CvtFxToFP_Intrinsic;
+ def int_aarch64_neon_vcvtfxu2fp : AdvSIMD_CvtFxToFP_Intrinsic;
-// Scalar Shift Right and Accumulate (Immediate)
-def int_aarch64_neon_vsrads_n : Neon_3Arg_ShiftImm_Intrinsic;
-def int_aarch64_neon_vsradu_n : Neon_3Arg_ShiftImm_Intrinsic;
+ // Vector FP->Int Conversions
+ def int_aarch64_neon_fcvtas : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtau : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtms : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtmu : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtns : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtnu : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtps : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtpu : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtzs : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtzu : AdvSIMD_FPToIntRounding_Intrinsic;
-// Scalar Rounding Shift Right and Accumulate (Immediate)
-def int_aarch64_neon_vrsrads_n : Neon_3Arg_ShiftImm_Intrinsic;
-def int_aarch64_neon_vrsradu_n : Neon_3Arg_ShiftImm_Intrinsic;
+ // Vector FP Rounding: only ties to even is unrepresented by a normal
+ // intrinsic.
+ def int_aarch64_neon_frintn : AdvSIMD_1FloatArg_Intrinsic;
-// Scalar Shift Left (Immediate)
-def int_aarch64_neon_vshld_n : Neon_2Arg_ShiftImm_Intrinsic;
+ // Scalar FP->Int conversions
-// Scalar Saturating Shift Left (Immediate)
-def int_aarch64_neon_vqshls_n : Neon_N2V_Intrinsic;
-def int_aarch64_neon_vqshlu_n : Neon_N2V_Intrinsic;
+ // Vector FP Inexact Narrowing
+ def int_aarch64_neon_fcvtxn : AdvSIMD_1VectorArg_Expand_Intrinsic;
+
+ // Scalar FP Inexact Narrowing
+ def int_aarch64_sisd_fcvtxn : Intrinsic<[llvm_float_ty], [llvm_double_ty],
+ [IntrNoMem]>;
+}
+
+let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
+ class AdvSIMD_2Vector2Index_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty, llvm_i64_ty, LLVMMatchType<0>, llvm_i64_ty],
+ [IntrNoMem]>;
+}
-// Scalar Signed Saturating Shift Left Unsigned (Immediate)
-def int_aarch64_neon_vqshlus_n : Neon_N2V_Intrinsic;
+// Vector element to element moves
+def int_aarch64_neon_vcopy_lane: AdvSIMD_2Vector2Index_Intrinsic;
+
+let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
+ class AdvSIMD_1Vec_Load_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadArgMem]>;
+ class AdvSIMD_1Vec_Store_Lane_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadWriteArgMem, NoCapture<2>]>;
+
+ class AdvSIMD_2Vec_Load_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+ [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadArgMem]>;
+ class AdvSIMD_2Vec_Load_Lane_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadArgMem]>;
+ class AdvSIMD_2Vec_Store_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadWriteArgMem, NoCapture<2>]>;
+ class AdvSIMD_2Vec_Store_Lane_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadWriteArgMem, NoCapture<3>]>;
+
+ class AdvSIMD_3Vec_Load_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
+ [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadArgMem]>;
+ class AdvSIMD_3Vec_Load_Lane_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadArgMem]>;
+ class AdvSIMD_3Vec_Store_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadWriteArgMem, NoCapture<3>]>;
+ class AdvSIMD_3Vec_Store_Lane_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadWriteArgMem, NoCapture<4>]>;
+
+ class AdvSIMD_4Vec_Load_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadArgMem]>;
+ class AdvSIMD_4Vec_Load_Lane_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadArgMem]>;
+ class AdvSIMD_4Vec_Store_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadWriteArgMem, NoCapture<4>]>;
+ class AdvSIMD_4Vec_Store_Lane_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadWriteArgMem, NoCapture<5>]>;
+}
-// Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
-def int_aarch64_neon_vcvtfxs2fp_n :
- Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>;
+// Memory ops
-// Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
-def int_aarch64_neon_vcvtfxu2fp_n :
- Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_aarch64_neon_ld1x2 : AdvSIMD_2Vec_Load_Intrinsic;
+def int_aarch64_neon_ld1x3 : AdvSIMD_3Vec_Load_Intrinsic;
+def int_aarch64_neon_ld1x4 : AdvSIMD_4Vec_Load_Intrinsic;
-// Scalar Floating-point Convert To Signed Fixed-point (Immediate)
-def int_aarch64_neon_vcvtfp2fxs_n :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_aarch64_neon_st1x2 : AdvSIMD_2Vec_Store_Intrinsic;
+def int_aarch64_neon_st1x3 : AdvSIMD_3Vec_Store_Intrinsic;
+def int_aarch64_neon_st1x4 : AdvSIMD_4Vec_Store_Intrinsic;
-// Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
-def int_aarch64_neon_vcvtfp2fxu_n :
- Intrinsic<[llvm_anyvector_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_aarch64_neon_ld2 : AdvSIMD_2Vec_Load_Intrinsic;
+def int_aarch64_neon_ld3 : AdvSIMD_3Vec_Load_Intrinsic;
+def int_aarch64_neon_ld4 : AdvSIMD_4Vec_Load_Intrinsic;
+def int_aarch64_neon_ld2lane : AdvSIMD_2Vec_Load_Lane_Intrinsic;
+def int_aarch64_neon_ld3lane : AdvSIMD_3Vec_Load_Lane_Intrinsic;
+def int_aarch64_neon_ld4lane : AdvSIMD_4Vec_Load_Lane_Intrinsic;
+
+def int_aarch64_neon_ld2r : AdvSIMD_2Vec_Load_Intrinsic;
+def int_aarch64_neon_ld3r : AdvSIMD_3Vec_Load_Intrinsic;
+def int_aarch64_neon_ld4r : AdvSIMD_4Vec_Load_Intrinsic;
+
+def int_aarch64_neon_st2 : AdvSIMD_2Vec_Store_Intrinsic;
+def int_aarch64_neon_st3 : AdvSIMD_3Vec_Store_Intrinsic;
+def int_aarch64_neon_st4 : AdvSIMD_4Vec_Store_Intrinsic;
+
+def int_aarch64_neon_st2lane : AdvSIMD_2Vec_Store_Lane_Intrinsic;
+def int_aarch64_neon_st3lane : AdvSIMD_3Vec_Store_Lane_Intrinsic;
+def int_aarch64_neon_st4lane : AdvSIMD_4Vec_Store_Lane_Intrinsic;
+
+let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
+ class AdvSIMD_Tbl1_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_v16i8_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbl2_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
+ class AdvSIMD_Tbl3_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
+ LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbl4_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
+ LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+ class AdvSIMD_Tbx1_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbx2_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+ LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbx3_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbx4_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+}
+def int_aarch64_neon_tbl1 : AdvSIMD_Tbl1_Intrinsic;
+def int_aarch64_neon_tbl2 : AdvSIMD_Tbl2_Intrinsic;
+def int_aarch64_neon_tbl3 : AdvSIMD_Tbl3_Intrinsic;
+def int_aarch64_neon_tbl4 : AdvSIMD_Tbl4_Intrinsic;
+
+def int_aarch64_neon_tbx1 : AdvSIMD_Tbx1_Intrinsic;
+def int_aarch64_neon_tbx2 : AdvSIMD_Tbx2_Intrinsic;
+def int_aarch64_neon_tbx3 : AdvSIMD_Tbx3_Intrinsic;
+def int_aarch64_neon_tbx4 : AdvSIMD_Tbx4_Intrinsic;
+
+let TargetPrefix = "aarch64" in {
+ class Crypto_AES_DataKey_Intrinsic
+ : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+ class Crypto_AES_Data_Intrinsic
+ : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+
+ // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
+ // (v4i32).
+ class Crypto_SHA_5Hash4Schedule_Intrinsic
+ : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+ // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
+ // (v4i32).
+ class Crypto_SHA_1Hash_Intrinsic
+ : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ // SHA intrinsic taking 8 words of the schedule
+ class Crypto_SHA_8Schedule_Intrinsic
+ : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+ // SHA intrinsic taking 12 words of the schedule
+ class Crypto_SHA_12Schedule_Intrinsic
+ : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+ // SHA intrinsic taking 8 words of the hash and 4 of the schedule.
+ class Crypto_SHA_8Hash4Schedule_Intrinsic
+ : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+}
+
+// AES
+def int_aarch64_crypto_aese : Crypto_AES_DataKey_Intrinsic;
+def int_aarch64_crypto_aesd : Crypto_AES_DataKey_Intrinsic;
+def int_aarch64_crypto_aesmc : Crypto_AES_Data_Intrinsic;
+def int_aarch64_crypto_aesimc : Crypto_AES_Data_Intrinsic;
+
+// SHA1
+def int_aarch64_crypto_sha1c : Crypto_SHA_5Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha1p : Crypto_SHA_5Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha1m : Crypto_SHA_5Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha1h : Crypto_SHA_1Hash_Intrinsic;
+
+def int_aarch64_crypto_sha1su0 : Crypto_SHA_12Schedule_Intrinsic;
+def int_aarch64_crypto_sha1su1 : Crypto_SHA_8Schedule_Intrinsic;
+
+// SHA256
+def int_aarch64_crypto_sha256h : Crypto_SHA_8Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha256h2 : Crypto_SHA_8Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha256su0 : Crypto_SHA_8Schedule_Intrinsic;
+def int_aarch64_crypto_sha256su1 : Crypto_SHA_12Schedule_Intrinsic;
+
+//===----------------------------------------------------------------------===//
+// CRC32
+
+let TargetPrefix = "aarch64" in {
+
+def int_aarch64_crc32b : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32cb : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32h : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32ch : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32w : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32cw : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32x : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32cx : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
}
diff --git a/include/llvm/IR/IntrinsicsARM.td b/include/llvm/IR/IntrinsicsARM.td
index 482f98e..d19d7b8 100644
--- a/include/llvm/IR/IntrinsicsARM.td
+++ b/include/llvm/IR/IntrinsicsARM.td
@@ -122,7 +122,13 @@ def int_arm_crc32cw : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
//===----------------------------------------------------------------------===//
// HINT
-def int_arm_sevl : Intrinsic<[], []>;
+
+def int_arm_hint : Intrinsic<[], [llvm_i32_ty]>;
+
+//===----------------------------------------------------------------------===//
+// UND (reserved undefined sequence)
+
+def int_arm_undefined : Intrinsic<[], [llvm_i32_ty]>;
//===----------------------------------------------------------------------===//
// Advanced SIMD (NEON)
diff --git a/include/llvm/IR/IntrinsicsARM64.td b/include/llvm/IR/IntrinsicsARM64.td
deleted file mode 100644
index d7f307e..0000000
--- a/include/llvm/IR/IntrinsicsARM64.td
+++ /dev/null
@@ -1,628 +0,0 @@
-//===- IntrinsicsARM64.td - Defines ARM64 intrinsics -------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines all of the ARM64-specific intrinsics.
-//
-//===----------------------------------------------------------------------===//
-
-let TargetPrefix = "arm64" in {
-
-def int_arm64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
-def int_arm64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
-def int_arm64_clrex : Intrinsic<[]>;
-
-def int_arm64_ldxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
-def int_arm64_stxp : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i64_ty,
- llvm_ptr_ty]>;
-
-def int_arm64_sdiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
- LLVMMatchType<0>], [IntrNoMem]>;
-def int_arm64_udiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
- LLVMMatchType<0>], [IntrNoMem]>;
-}
-
-//===----------------------------------------------------------------------===//
-// Advanced SIMD (NEON)
-
-let TargetPrefix = "arm64" in { // All intrinsics start with "llvm.arm64.".
- class AdvSIMD_2Scalar_Float_Intrinsic
- : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem]>;
-
- class AdvSIMD_FPToIntRounding_Intrinsic
- : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
-
- class AdvSIMD_1IntArg_Intrinsic
- : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
- class AdvSIMD_1FloatArg_Intrinsic
- : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
- class AdvSIMD_1VectorArg_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
- class AdvSIMD_1VectorArg_Expand_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
- class AdvSIMD_1VectorArg_Long_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
- class AdvSIMD_1IntArg_Narrow_Intrinsic
- : Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem]>;
- class AdvSIMD_1VectorArg_Narrow_Intrinsic
- : Intrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
- class AdvSIMD_1VectorArg_Int_Across_Intrinsic
- : Intrinsic<[llvm_anyint_ty], [llvm_anyvector_ty], [IntrNoMem]>;
- class AdvSIMD_1VectorArg_Float_Across_Intrinsic
- : Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
-
- class AdvSIMD_2IntArg_Intrinsic
- : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem]>;
- class AdvSIMD_2FloatArg_Intrinsic
- : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem]>;
- class AdvSIMD_2VectorArg_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem]>;
- class AdvSIMD_2VectorArg_Compare_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
- [IntrNoMem]>;
- class AdvSIMD_2Arg_FloatCompare_Intrinsic
- : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
- [IntrNoMem]>;
- class AdvSIMD_2VectorArg_Long_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
- [IntrNoMem]>;
- class AdvSIMD_2VectorArg_Wide_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMTruncatedType<0>],
- [IntrNoMem]>;
- class AdvSIMD_2VectorArg_Narrow_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMExtendedType<0>, LLVMExtendedType<0>],
- [IntrNoMem]>;
- class AdvSIMD_2Arg_Scalar_Narrow_Intrinsic
- : Intrinsic<[llvm_anyint_ty],
- [LLVMExtendedType<0>, llvm_i32_ty],
- [IntrNoMem]>;
- class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
- class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMTruncatedType<0>],
- [IntrNoMem]>;
- class AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMTruncatedType<0>, llvm_i32_ty],
- [IntrNoMem]>;
- class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty],
- [IntrNoMem]>;
-
- class AdvSIMD_3VectorArg_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem]>;
- class AdvSIMD_3VectorArg_Scalar_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
- [IntrNoMem]>;
- class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty,
- LLVMMatchType<1>], [IntrNoMem]>;
- class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty, llvm_i32_ty],
- [IntrNoMem]>;
- class AdvSIMD_CvtFxToFP_Intrinsic
- : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
- [IntrNoMem]>;
- class AdvSIMD_CvtFPToFx_Intrinsic
- : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty],
- [IntrNoMem]>;
-}
-
-// Arithmetic ops
-
-let Properties = [IntrNoMem] in {
- // Vector Add Across Lanes
- def int_arm64_neon_saddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
- def int_arm64_neon_uaddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
- def int_arm64_neon_faddv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
-
- // Vector Long Add Across Lanes
- def int_arm64_neon_saddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
- def int_arm64_neon_uaddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
-
- // Vector Halving Add
- def int_arm64_neon_shadd : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_uhadd : AdvSIMD_2VectorArg_Intrinsic;
-
- // Vector Rounding Halving Add
- def int_arm64_neon_srhadd : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_urhadd : AdvSIMD_2VectorArg_Intrinsic;
-
- // Vector Saturating Add
- def int_arm64_neon_sqadd : AdvSIMD_2IntArg_Intrinsic;
- def int_arm64_neon_suqadd : AdvSIMD_2IntArg_Intrinsic;
- def int_arm64_neon_usqadd : AdvSIMD_2IntArg_Intrinsic;
- def int_arm64_neon_uqadd : AdvSIMD_2IntArg_Intrinsic;
-
- // Vector Add High-Half
- // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
- // header is no longer supported.
- def int_arm64_neon_addhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
-
- // Vector Rounding Add High-Half
- def int_arm64_neon_raddhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
-
- // Vector Saturating Doubling Multiply High
- def int_arm64_neon_sqdmulh : AdvSIMD_2IntArg_Intrinsic;
-
- // Vector Saturating Rounding Doubling Multiply High
- def int_arm64_neon_sqrdmulh : AdvSIMD_2IntArg_Intrinsic;
-
- // Vector Polynominal Multiply
- def int_arm64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
-
- // Vector Long Multiply
- def int_arm64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
- def int_arm64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
- def int_arm64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
-
- // 64-bit polynomial multiply really returns an i128, which is not legal. Fake
- // it with a v16i8.
- def int_arm64_neon_pmull64 :
- Intrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
-
- // Vector Extending Multiply
- def int_arm64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic;
-
- // Vector Saturating Doubling Long Multiply
- def int_arm64_neon_sqdmull : AdvSIMD_2VectorArg_Long_Intrinsic;
- def int_arm64_neon_sqdmulls_scalar
- : Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-
- // Vector Halving Subtract
- def int_arm64_neon_shsub : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_uhsub : AdvSIMD_2VectorArg_Intrinsic;
-
- // Vector Saturating Subtract
- def int_arm64_neon_sqsub : AdvSIMD_2IntArg_Intrinsic;
- def int_arm64_neon_uqsub : AdvSIMD_2IntArg_Intrinsic;
-
- // Vector Subtract High-Half
- // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
- // header is no longer supported.
- def int_arm64_neon_subhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
-
- // Vector Rounding Subtract High-Half
- def int_arm64_neon_rsubhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
-
- // Vector Compare Absolute Greater-than-or-equal
- def int_arm64_neon_facge : AdvSIMD_2Arg_FloatCompare_Intrinsic;
-
- // Vector Compare Absolute Greater-than
- def int_arm64_neon_facgt : AdvSIMD_2Arg_FloatCompare_Intrinsic;
-
- // Vector Absolute Difference
- def int_arm64_neon_sabd : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_uabd : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_fabd : AdvSIMD_2VectorArg_Intrinsic;
-
- // Scalar Absolute Difference
- def int_arm64_sisd_fabd : AdvSIMD_2Scalar_Float_Intrinsic;
-
- // Vector Max
- def int_arm64_neon_smax : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_umax : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_fmax : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_fmaxnmp : AdvSIMD_2VectorArg_Intrinsic;
-
- // Vector Max Across Lanes
- def int_arm64_neon_smaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
- def int_arm64_neon_umaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
- def int_arm64_neon_fmaxv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
- def int_arm64_neon_fmaxnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
-
- // Vector Min
- def int_arm64_neon_smin : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_umin : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_fmin : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_fminnmp : AdvSIMD_2VectorArg_Intrinsic;
-
- // Vector Min/Max Number
- def int_arm64_neon_fminnm : AdvSIMD_2FloatArg_Intrinsic;
- def int_arm64_neon_fmaxnm : AdvSIMD_2FloatArg_Intrinsic;
-
- // Vector Min Across Lanes
- def int_arm64_neon_sminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
- def int_arm64_neon_uminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
- def int_arm64_neon_fminv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
- def int_arm64_neon_fminnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
-
- // Pairwise Add
- def int_arm64_neon_addp : AdvSIMD_2VectorArg_Intrinsic;
-
- // Long Pairwise Add
- // FIXME: In theory, we shouldn't need intrinsics for saddlp or
- // uaddlp, but tblgen's type inference currently can't handle the
- // pattern fragments this ends up generating.
- def int_arm64_neon_saddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
- def int_arm64_neon_uaddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
-
- // Folding Maximum
- def int_arm64_neon_smaxp : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_umaxp : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_fmaxp : AdvSIMD_2VectorArg_Intrinsic;
-
- // Folding Minimum
- def int_arm64_neon_sminp : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_uminp : AdvSIMD_2VectorArg_Intrinsic;
- def int_arm64_neon_fminp : AdvSIMD_2VectorArg_Intrinsic;
-
- // Reciprocal Estimate/Step
- def int_arm64_neon_frecps : AdvSIMD_2FloatArg_Intrinsic;
- def int_arm64_neon_frsqrts : AdvSIMD_2FloatArg_Intrinsic;
-
- // Reciprocal Exponent
- def int_arm64_neon_frecpx : AdvSIMD_1FloatArg_Intrinsic;
-
- // Vector Saturating Shift Left
- def int_arm64_neon_sqshl : AdvSIMD_2IntArg_Intrinsic;
- def int_arm64_neon_uqshl : AdvSIMD_2IntArg_Intrinsic;
-
- // Vector Rounding Shift Left
- def int_arm64_neon_srshl : AdvSIMD_2IntArg_Intrinsic;
- def int_arm64_neon_urshl : AdvSIMD_2IntArg_Intrinsic;
-
- // Vector Saturating Rounding Shift Left
- def int_arm64_neon_sqrshl : AdvSIMD_2IntArg_Intrinsic;
- def int_arm64_neon_uqrshl : AdvSIMD_2IntArg_Intrinsic;
-
- // Vector Signed->Unsigned Shift Left by Constant
- def int_arm64_neon_sqshlu : AdvSIMD_2IntArg_Intrinsic;
-
- // Vector Signed->Unsigned Narrowing Saturating Shift Right by Constant
- def int_arm64_neon_sqshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
-
- // Vector Signed->Unsigned Rounding Narrowing Saturating Shift Right by Const
- def int_arm64_neon_sqrshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
-
- // Vector Narrowing Shift Right by Constant
- def int_arm64_neon_sqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
- def int_arm64_neon_uqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
-
- // Vector Rounding Narrowing Shift Right by Constant
- def int_arm64_neon_rshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
-
- // Vector Rounding Narrowing Saturating Shift Right by Constant
- def int_arm64_neon_sqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
- def int_arm64_neon_uqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
-
- // Vector Shift Left
- def int_arm64_neon_sshl : AdvSIMD_2IntArg_Intrinsic;
- def int_arm64_neon_ushl : AdvSIMD_2IntArg_Intrinsic;
-
- // Vector Widening Shift Left by Constant
- def int_arm64_neon_shll : AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic;
- def int_arm64_neon_sshll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
- def int_arm64_neon_ushll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
-
- // Vector Shift Right by Constant and Insert
- def int_arm64_neon_vsri : AdvSIMD_3VectorArg_Scalar_Intrinsic;
-
- // Vector Shift Left by Constant and Insert
- def int_arm64_neon_vsli : AdvSIMD_3VectorArg_Scalar_Intrinsic;
-
- // Vector Saturating Narrow
- def int_arm64_neon_scalar_sqxtn: AdvSIMD_1IntArg_Narrow_Intrinsic;
- def int_arm64_neon_scalar_uqxtn : AdvSIMD_1IntArg_Narrow_Intrinsic;
- def int_arm64_neon_sqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
- def int_arm64_neon_uqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
-
- // Vector Saturating Extract and Unsigned Narrow
- def int_arm64_neon_scalar_sqxtun : AdvSIMD_1IntArg_Narrow_Intrinsic;
- def int_arm64_neon_sqxtun : AdvSIMD_1VectorArg_Narrow_Intrinsic;
-
- // Vector Absolute Value
- def int_arm64_neon_abs : AdvSIMD_1IntArg_Intrinsic;
-
- // Vector Saturating Absolute Value
- def int_arm64_neon_sqabs : AdvSIMD_1IntArg_Intrinsic;
-
- // Vector Saturating Negation
- def int_arm64_neon_sqneg : AdvSIMD_1IntArg_Intrinsic;
-
- // Vector Count Leading Sign Bits
- def int_arm64_neon_cls : AdvSIMD_1VectorArg_Intrinsic;
-
- // Vector Reciprocal Estimate
- def int_arm64_neon_urecpe : AdvSIMD_1VectorArg_Intrinsic;
- def int_arm64_neon_frecpe : AdvSIMD_1FloatArg_Intrinsic;
-
- // Vector Square Root Estimate
- def int_arm64_neon_ursqrte : AdvSIMD_1VectorArg_Intrinsic;
- def int_arm64_neon_frsqrte : AdvSIMD_1FloatArg_Intrinsic;
-
- // Vector Bitwise Reverse
- def int_arm64_neon_rbit : AdvSIMD_1VectorArg_Intrinsic;
-
- // Vector Conversions Between Half-Precision and Single-Precision.
- def int_arm64_neon_vcvtfp2hf
- : Intrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
- def int_arm64_neon_vcvthf2fp
- : Intrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;
-
- // Vector Conversions Between Floating-point and Fixed-point.
- def int_arm64_neon_vcvtfp2fxs : AdvSIMD_CvtFPToFx_Intrinsic;
- def int_arm64_neon_vcvtfp2fxu : AdvSIMD_CvtFPToFx_Intrinsic;
- def int_arm64_neon_vcvtfxs2fp : AdvSIMD_CvtFxToFP_Intrinsic;
- def int_arm64_neon_vcvtfxu2fp : AdvSIMD_CvtFxToFP_Intrinsic;
-
- // Vector FP->Int Conversions
- def int_arm64_neon_fcvtas : AdvSIMD_FPToIntRounding_Intrinsic;
- def int_arm64_neon_fcvtau : AdvSIMD_FPToIntRounding_Intrinsic;
- def int_arm64_neon_fcvtms : AdvSIMD_FPToIntRounding_Intrinsic;
- def int_arm64_neon_fcvtmu : AdvSIMD_FPToIntRounding_Intrinsic;
- def int_arm64_neon_fcvtns : AdvSIMD_FPToIntRounding_Intrinsic;
- def int_arm64_neon_fcvtnu : AdvSIMD_FPToIntRounding_Intrinsic;
- def int_arm64_neon_fcvtps : AdvSIMD_FPToIntRounding_Intrinsic;
- def int_arm64_neon_fcvtpu : AdvSIMD_FPToIntRounding_Intrinsic;
- def int_arm64_neon_fcvtzs : AdvSIMD_FPToIntRounding_Intrinsic;
- def int_arm64_neon_fcvtzu : AdvSIMD_FPToIntRounding_Intrinsic;
-
- // Vector FP Rounding: only ties to even is unrepresented by a normal
- // intrinsic.
- def int_arm64_neon_frintn : AdvSIMD_1FloatArg_Intrinsic;
-
- // Scalar FP->Int conversions
-
- // Vector FP Inexact Narrowing
- def int_arm64_neon_fcvtxn : AdvSIMD_1VectorArg_Expand_Intrinsic;
-
- // Scalar FP Inexact Narrowing
- def int_arm64_sisd_fcvtxn : Intrinsic<[llvm_float_ty], [llvm_double_ty],
- [IntrNoMem]>;
-}
-
-let TargetPrefix = "arm64" in { // All intrinsics start with "llvm.arm64.".
- class AdvSIMD_2Vector2Index_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_i64_ty, LLVMMatchType<0>, llvm_i64_ty],
- [IntrNoMem]>;
-}
-
-// Vector element to element moves
-def int_arm64_neon_vcopy_lane: AdvSIMD_2Vector2Index_Intrinsic;
-
-let TargetPrefix = "arm64" in { // All intrinsics start with "llvm.arm64.".
- class AdvSIMD_1Vec_Load_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>],
- [IntrReadArgMem]>;
- class AdvSIMD_1Vec_Store_Lane_Intrinsic
- : Intrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
- [IntrReadWriteArgMem, NoCapture<2>]>;
-
- class AdvSIMD_2Vec_Load_Intrinsic
- : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
- [LLVMAnyPointerType<LLVMMatchType<0>>],
- [IntrReadArgMem]>;
- class AdvSIMD_2Vec_Load_Lane_Intrinsic
- : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
- [LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i64_ty, llvm_anyptr_ty],
- [IntrReadArgMem]>;
- class AdvSIMD_2Vec_Store_Intrinsic
- : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMAnyPointerType<LLVMMatchType<0>>],
- [IntrReadWriteArgMem, NoCapture<2>]>;
- class AdvSIMD_2Vec_Store_Lane_Intrinsic
- : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
- llvm_i64_ty, llvm_anyptr_ty],
- [IntrReadWriteArgMem, NoCapture<3>]>;
-
- class AdvSIMD_3Vec_Load_Intrinsic
- : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
- [LLVMAnyPointerType<LLVMMatchType<0>>],
- [IntrReadArgMem]>;
- class AdvSIMD_3Vec_Load_Lane_Intrinsic
- : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
- [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i64_ty, llvm_anyptr_ty],
- [IntrReadArgMem]>;
- class AdvSIMD_3Vec_Store_Intrinsic
- : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>],
- [IntrReadWriteArgMem, NoCapture<3>]>;
- class AdvSIMD_3Vec_Store_Lane_Intrinsic
- : Intrinsic<[], [llvm_anyvector_ty,
- LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i64_ty, llvm_anyptr_ty],
- [IntrReadWriteArgMem, NoCapture<4>]>;
-
- class AdvSIMD_4Vec_Load_Intrinsic
- : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>],
- [LLVMAnyPointerType<LLVMMatchType<0>>],
- [IntrReadArgMem]>;
- class AdvSIMD_4Vec_Load_Lane_Intrinsic
- : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>],
- [LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i64_ty, llvm_anyptr_ty],
- [IntrReadArgMem]>;
- class AdvSIMD_4Vec_Store_Intrinsic
- : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMAnyPointerType<LLVMMatchType<0>>],
- [IntrReadWriteArgMem, NoCapture<4>]>;
- class AdvSIMD_4Vec_Store_Lane_Intrinsic
- : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i64_ty, llvm_anyptr_ty],
- [IntrReadWriteArgMem, NoCapture<5>]>;
-}
-
-// Memory ops
-
-def int_arm64_neon_ld1x2 : AdvSIMD_2Vec_Load_Intrinsic;
-def int_arm64_neon_ld1x3 : AdvSIMD_3Vec_Load_Intrinsic;
-def int_arm64_neon_ld1x4 : AdvSIMD_4Vec_Load_Intrinsic;
-
-def int_arm64_neon_st1x2 : AdvSIMD_2Vec_Store_Intrinsic;
-def int_arm64_neon_st1x3 : AdvSIMD_3Vec_Store_Intrinsic;
-def int_arm64_neon_st1x4 : AdvSIMD_4Vec_Store_Intrinsic;
-
-def int_arm64_neon_ld2 : AdvSIMD_2Vec_Load_Intrinsic;
-def int_arm64_neon_ld3 : AdvSIMD_3Vec_Load_Intrinsic;
-def int_arm64_neon_ld4 : AdvSIMD_4Vec_Load_Intrinsic;
-
-def int_arm64_neon_ld2lane : AdvSIMD_2Vec_Load_Lane_Intrinsic;
-def int_arm64_neon_ld3lane : AdvSIMD_3Vec_Load_Lane_Intrinsic;
-def int_arm64_neon_ld4lane : AdvSIMD_4Vec_Load_Lane_Intrinsic;
-
-def int_arm64_neon_ld2r : AdvSIMD_2Vec_Load_Intrinsic;
-def int_arm64_neon_ld3r : AdvSIMD_3Vec_Load_Intrinsic;
-def int_arm64_neon_ld4r : AdvSIMD_4Vec_Load_Intrinsic;
-
-def int_arm64_neon_st2 : AdvSIMD_2Vec_Store_Intrinsic;
-def int_arm64_neon_st3 : AdvSIMD_3Vec_Store_Intrinsic;
-def int_arm64_neon_st4 : AdvSIMD_4Vec_Store_Intrinsic;
-
-def int_arm64_neon_st2lane : AdvSIMD_2Vec_Store_Lane_Intrinsic;
-def int_arm64_neon_st3lane : AdvSIMD_3Vec_Store_Lane_Intrinsic;
-def int_arm64_neon_st4lane : AdvSIMD_4Vec_Store_Lane_Intrinsic;
-
-let TargetPrefix = "arm64" in { // All intrinsics start with "llvm.arm64.".
- class AdvSIMD_Tbl1_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [llvm_v16i8_ty, LLVMMatchType<0>],
- [IntrNoMem]>;
- class AdvSIMD_Tbl2_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
- class AdvSIMD_Tbl3_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
- LLVMMatchType<0>],
- [IntrNoMem]>;
- class AdvSIMD_Tbl4_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
- LLVMMatchType<0>],
- [IntrNoMem]>;
-
- class AdvSIMD_Tbx1_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
- [IntrNoMem]>;
- class AdvSIMD_Tbx2_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
- LLVMMatchType<0>],
- [IntrNoMem]>;
- class AdvSIMD_Tbx3_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, LLVMMatchType<0>],
- [IntrNoMem]>;
- class AdvSIMD_Tbx4_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
- llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
- [IntrNoMem]>;
-}
-def int_arm64_neon_tbl1 : AdvSIMD_Tbl1_Intrinsic;
-def int_arm64_neon_tbl2 : AdvSIMD_Tbl2_Intrinsic;
-def int_arm64_neon_tbl3 : AdvSIMD_Tbl3_Intrinsic;
-def int_arm64_neon_tbl4 : AdvSIMD_Tbl4_Intrinsic;
-
-def int_arm64_neon_tbx1 : AdvSIMD_Tbx1_Intrinsic;
-def int_arm64_neon_tbx2 : AdvSIMD_Tbx2_Intrinsic;
-def int_arm64_neon_tbx3 : AdvSIMD_Tbx3_Intrinsic;
-def int_arm64_neon_tbx4 : AdvSIMD_Tbx4_Intrinsic;
-
-let TargetPrefix = "arm64" in {
- class Crypto_AES_DataKey_Intrinsic
- : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
-
- class Crypto_AES_Data_Intrinsic
- : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
-
- // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
- // (v4i32).
- class Crypto_SHA_5Hash4Schedule_Intrinsic
- : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
-
- // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
- // (v4i32).
- class Crypto_SHA_1Hash_Intrinsic
- : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
-
- // SHA intrinsic taking 8 words of the schedule
- class Crypto_SHA_8Schedule_Intrinsic
- : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
-
- // SHA intrinsic taking 12 words of the schedule
- class Crypto_SHA_12Schedule_Intrinsic
- : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
-
- // SHA intrinsic taking 8 words of the hash and 4 of the schedule.
- class Crypto_SHA_8Hash4Schedule_Intrinsic
- : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem]>;
-}
-
-// AES
-def int_arm64_crypto_aese : Crypto_AES_DataKey_Intrinsic;
-def int_arm64_crypto_aesd : Crypto_AES_DataKey_Intrinsic;
-def int_arm64_crypto_aesmc : Crypto_AES_Data_Intrinsic;
-def int_arm64_crypto_aesimc : Crypto_AES_Data_Intrinsic;
-
-// SHA1
-def int_arm64_crypto_sha1c : Crypto_SHA_5Hash4Schedule_Intrinsic;
-def int_arm64_crypto_sha1p : Crypto_SHA_5Hash4Schedule_Intrinsic;
-def int_arm64_crypto_sha1m : Crypto_SHA_5Hash4Schedule_Intrinsic;
-def int_arm64_crypto_sha1h : Crypto_SHA_1Hash_Intrinsic;
-
-def int_arm64_crypto_sha1su0 : Crypto_SHA_12Schedule_Intrinsic;
-def int_arm64_crypto_sha1su1 : Crypto_SHA_8Schedule_Intrinsic;
-
-// SHA256
-def int_arm64_crypto_sha256h : Crypto_SHA_8Hash4Schedule_Intrinsic;
-def int_arm64_crypto_sha256h2 : Crypto_SHA_8Hash4Schedule_Intrinsic;
-def int_arm64_crypto_sha256su0 : Crypto_SHA_8Schedule_Intrinsic;
-def int_arm64_crypto_sha256su1 : Crypto_SHA_12Schedule_Intrinsic;
-
-//===----------------------------------------------------------------------===//
-// CRC32
-
-let TargetPrefix = "arm64" in {
-
-def int_arm64_crc32b : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-def int_arm64_crc32cb : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-def int_arm64_crc32h : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-def int_arm64_crc32ch : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-def int_arm64_crc32w : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-def int_arm64_crc32cw : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
-def int_arm64_crc32x : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
- [IntrNoMem]>;
-def int_arm64_crc32cx : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
- [IntrNoMem]>;
-}
diff --git a/include/llvm/IR/IntrinsicsNVVM.td b/include/llvm/IR/IntrinsicsNVVM.td
index 7f72ce8..26dc70a 100644
--- a/include/llvm/IR/IntrinsicsNVVM.td
+++ b/include/llvm/IR/IntrinsicsNVVM.td
@@ -875,6 +875,14 @@ def int_nvvm_move_ptr : Intrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty],
[IntrNoMem, NoCapture<0>], "llvm.nvvm.move.ptr">;
+// For getting the handle from a texture or surface variable
+def int_nvvm_texsurf_handle
+ : Intrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_anyi64ptr_ty],
+ [IntrNoMem], "llvm.nvvm.texsurf.handle">;
+def int_nvvm_texsurf_handle_internal
+ : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty],
+ [IntrNoMem], "llvm.nvvm.texsurf.handle.internal">;
+
/// Error / Warn
def int_nvvm_compiler_error :
Intrinsic<[], [llvm_anyptr_ty], [], "llvm.nvvm.compiler.error">;
@@ -882,6 +890,918 @@ def int_nvvm_compiler_warn :
Intrinsic<[], [llvm_anyptr_ty], [], "llvm.nvvm.compiler.warn">;
+// Texture Fetch
+def int_nvvm_tex_1d_v4f32_i32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.1d.v4f32.i32">;
+def int_nvvm_tex_1d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.v4f32.f32">;
+def int_nvvm_tex_1d_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.level.v4f32.f32">;
+def int_nvvm_tex_1d_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.grad.v4f32.f32">;
+def int_nvvm_tex_1d_v4i32_i32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.1d.v4i32.i32">;
+def int_nvvm_tex_1d_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.v4i32.f32">;
+def int_nvvm_tex_1d_level_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.level.v4i32.f32.level">;
+def int_nvvm_tex_1d_grad_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.grad.v4i32.f32">;
+
+def int_nvvm_tex_1d_array_v4f32_i32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.1d.array.v4f32.i32">;
+def int_nvvm_tex_1d_array_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.v4f32.f32">;
+def int_nvvm_tex_1d_array_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.level.v4f32.f32">;
+def int_nvvm_tex_1d_array_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.grad.v4f32.f32">;
+def int_nvvm_tex_1d_array_v4i32_i32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.1d.array.v4i32.i32">;
+def int_nvvm_tex_1d_array_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.v4i32.f32">;
+def int_nvvm_tex_1d_array_level_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.level.v4i32.f32">;
+def int_nvvm_tex_1d_array_grad_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.grad.v4i32.f32">;
+
+def int_nvvm_tex_2d_v4f32_i32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.2d.v4f32.i32">;
+def int_nvvm_tex_2d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.v4f32.f32">;
+def int_nvvm_tex_2d_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.level.v4f32.f32">;
+def int_nvvm_tex_2d_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.grad.v4f32.f32">;
+def int_nvvm_tex_2d_v4i32_i32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.2d.v4i32.i32">;
+def int_nvvm_tex_2d_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.v4i32.f32">;
+def int_nvvm_tex_2d_level_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.level.v4i32.f32">;
+def int_nvvm_tex_2d_grad_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.grad.v4i32.f32">;
+
+def int_nvvm_tex_2d_array_v4f32_i32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty], [],
+ "llvm.nvvm.tex.2d.array.v4f32.i32">;
+def int_nvvm_tex_2d_array_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.v4f32.f32">;
+def int_nvvm_tex_2d_array_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.level.v4f32.f32">;
+def int_nvvm_tex_2d_array_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.grad.v4f32.f32">;
+def int_nvvm_tex_2d_array_v4i32_i32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty], [],
+ "llvm.nvvm.tex.2d.array.v4i32.i32">;
+def int_nvvm_tex_2d_array_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.v4i32.f32">;
+def int_nvvm_tex_2d_array_level_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.level.v4i32.f32">;
+def int_nvvm_tex_2d_array_grad_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.grad.v4i32.f32">;
+
+def int_nvvm_tex_3d_v4f32_i32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [], "llvm.nvvm.tex.3d.v4f32.i32">;
+def int_nvvm_tex_3d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.v4f32.f32">;
+def int_nvvm_tex_3d_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.level.v4f32.f32">;
+def int_nvvm_tex_3d_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.grad.v4f32.f32">;
+def int_nvvm_tex_3d_v4i32_i32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [], "llvm.nvvm.tex.3d.v4i32.i32">;
+def int_nvvm_tex_3d_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.v4i32.f32">;
+def int_nvvm_tex_3d_level_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.level.v4i32.f32">;
+def int_nvvm_tex_3d_grad_v4i32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.grad.v4i32.f32">;
+
+// Surface Load
+def int_nvvm_suld_1d_i8_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i8.trap">;
+def int_nvvm_suld_1d_i16_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i16.trap">;
+def int_nvvm_suld_1d_i32_trap
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i32.trap">;
+def int_nvvm_suld_1d_v2i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i8.trap">;
+def int_nvvm_suld_1d_v2i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i16.trap">;
+def int_nvvm_suld_1d_v2i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i32.trap">;
+def int_nvvm_suld_1d_v4i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v4i8.trap">;
+def int_nvvm_suld_1d_v4i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v4i16.trap">;
+def int_nvvm_suld_1d_v4i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v4i32.trap">;
+
+def int_nvvm_suld_1d_array_i8_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i8.trap">;
+def int_nvvm_suld_1d_array_i16_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i16.trap">;
+def int_nvvm_suld_1d_array_i32_trap
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i32.trap">;
+def int_nvvm_suld_1d_array_v2i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i8.trap">;
+def int_nvvm_suld_1d_array_v2i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i16.trap">;
+def int_nvvm_suld_1d_array_v2i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i32.trap">;
+def int_nvvm_suld_1d_array_v4i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v4i8.trap">;
+def int_nvvm_suld_1d_array_v4i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v4i16.trap">;
+def int_nvvm_suld_1d_array_v4i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v4i32.trap">;
+
+def int_nvvm_suld_2d_i8_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i8.trap">;
+def int_nvvm_suld_2d_i16_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i16.trap">;
+def int_nvvm_suld_2d_i32_trap
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i32.trap">;
+def int_nvvm_suld_2d_v2i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i8.trap">;
+def int_nvvm_suld_2d_v2i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i16.trap">;
+def int_nvvm_suld_2d_v2i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i32.trap">;
+def int_nvvm_suld_2d_v4i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v4i8.trap">;
+def int_nvvm_suld_2d_v4i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v4i16.trap">;
+def int_nvvm_suld_2d_v4i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v4i32.trap">;
+
+def int_nvvm_suld_2d_array_i8_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i8.trap">;
+def int_nvvm_suld_2d_array_i16_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i16.trap">;
+def int_nvvm_suld_2d_array_i32_trap
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i32.trap">;
+def int_nvvm_suld_2d_array_v2i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i8.trap">;
+def int_nvvm_suld_2d_array_v2i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i16.trap">;
+def int_nvvm_suld_2d_array_v2i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i32.trap">;
+def int_nvvm_suld_2d_array_v4i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v4i8.trap">;
+def int_nvvm_suld_2d_array_v4i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v4i16.trap">;
+def int_nvvm_suld_2d_array_v4i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v4i32.trap">;
+
+def int_nvvm_suld_3d_i8_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i8.trap">;
+def int_nvvm_suld_3d_i16_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i16.trap">;
+def int_nvvm_suld_3d_i32_trap
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i32.trap">;
+def int_nvvm_suld_3d_v2i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i8.trap">;
+def int_nvvm_suld_3d_v2i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i16.trap">;
+def int_nvvm_suld_3d_v2i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i32.trap">;
+def int_nvvm_suld_3d_v4i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v4i8.trap">;
+def int_nvvm_suld_3d_v4i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v4i16.trap">;
+def int_nvvm_suld_3d_v4i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v4i32.trap">;
+
+//===- Texture Query ------------------------------------------------------===//
+
+def int_nvvm_txq_channel_order
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.channel.order">,
+ GCCBuiltin<"__nvvm_txq_channel_order">;
+def int_nvvm_txq_channel_data_type
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.channel.data.type">,
+ GCCBuiltin<"__nvvm_txq_channel_data_type">;
+def int_nvvm_txq_width
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.width">,
+ GCCBuiltin<"__nvvm_txq_width">;
+def int_nvvm_txq_height
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.height">,
+ GCCBuiltin<"__nvvm_txq_height">;
+def int_nvvm_txq_depth
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.depth">,
+ GCCBuiltin<"__nvvm_txq_depth">;
+def int_nvvm_txq_array_size
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.array.size">,
+ GCCBuiltin<"__nvvm_txq_array_size">;
+def int_nvvm_txq_num_samples
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.num.samples">,
+ GCCBuiltin<"__nvvm_txq_num_samples">;
+def int_nvvm_txq_num_mipmap_levels
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.num.mipmap.levels">,
+ GCCBuiltin<"__nvvm_txq_num_mipmap_levels">;
+
+//===- Surface Query ------------------------------------------------------===//
+
+def int_nvvm_suq_channel_order
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.suq.channel.order">,
+ GCCBuiltin<"__nvvm_suq_channel_order">;
+def int_nvvm_suq_channel_data_type
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.suq.channel.data.type">,
+ GCCBuiltin<"__nvvm_suq_channel_data_type">;
+def int_nvvm_suq_width
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.suq.width">,
+ GCCBuiltin<"__nvvm_suq_width">;
+def int_nvvm_suq_height
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.suq.height">,
+ GCCBuiltin<"__nvvm_suq_height">;
+def int_nvvm_suq_depth
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.suq.depth">,
+ GCCBuiltin<"__nvvm_suq_depth">;
+def int_nvvm_suq_array_size
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.suq.array.size">,
+ GCCBuiltin<"__nvvm_suq_array_size">;
+
+
+//===- Handle Query -------------------------------------------------------===//
+
+def int_nvvm_istypep_sampler
+ : Intrinsic<[llvm_i1_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.istypep.sampler">,
+ GCCBuiltin<"__nvvm_istypep_sampler">;
+def int_nvvm_istypep_surface
+ : Intrinsic<[llvm_i1_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.istypep.surface">,
+ GCCBuiltin<"__nvvm_istypep_surface">;
+def int_nvvm_istypep_texture
+ : Intrinsic<[llvm_i1_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.istypep.texture">,
+ GCCBuiltin<"__nvvm_istypep_texture">;
+
+
+
+//===- Surface Stores -----------------------------------------------------===//
+
+// Unformatted
+
+def int_nvvm_sust_b_1d_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i8_trap">;
+def int_nvvm_sust_b_1d_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i16_trap">;
+def int_nvvm_sust_b_1d_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i32_trap">;
+def int_nvvm_sust_b_1d_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i8_trap">;
+def int_nvvm_sust_b_1d_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i16_trap">;
+def int_nvvm_sust_b_1d_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i32_trap">;
+def int_nvvm_sust_b_1d_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v4i8_trap">;
+def int_nvvm_sust_b_1d_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v4i16_trap">;
+def int_nvvm_sust_b_1d_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v4i32_trap">;
+
+
+def int_nvvm_sust_b_1d_array_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i8_trap">;
+def int_nvvm_sust_b_1d_array_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i16_trap">;
+def int_nvvm_sust_b_1d_array_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i32_trap">;
+def int_nvvm_sust_b_1d_array_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i8_trap">;
+def int_nvvm_sust_b_1d_array_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i16_trap">;
+def int_nvvm_sust_b_1d_array_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i32_trap">;
+def int_nvvm_sust_b_1d_array_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v4i8_trap">;
+def int_nvvm_sust_b_1d_array_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v4i16_trap">;
+def int_nvvm_sust_b_1d_array_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v4i32_trap">;
+
+
+def int_nvvm_sust_b_2d_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i8_trap">;
+def int_nvvm_sust_b_2d_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i16_trap">;
+def int_nvvm_sust_b_2d_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i32_trap">;
+def int_nvvm_sust_b_2d_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i8_trap">;
+def int_nvvm_sust_b_2d_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i16_trap">;
+def int_nvvm_sust_b_2d_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i32_trap">;
+def int_nvvm_sust_b_2d_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v4i8_trap">;
+def int_nvvm_sust_b_2d_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v4i16_trap">;
+def int_nvvm_sust_b_2d_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v4i32_trap">;
+
+
+def int_nvvm_sust_b_2d_array_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i8_trap">;
+def int_nvvm_sust_b_2d_array_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i16_trap">;
+def int_nvvm_sust_b_2d_array_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i32_trap">;
+def int_nvvm_sust_b_2d_array_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i8_trap">;
+def int_nvvm_sust_b_2d_array_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i16_trap">;
+def int_nvvm_sust_b_2d_array_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i32_trap">;
+def int_nvvm_sust_b_2d_array_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v4i8_trap">;
+def int_nvvm_sust_b_2d_array_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v4i16_trap">;
+def int_nvvm_sust_b_2d_array_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v4i32_trap">;
+
+
+def int_nvvm_sust_b_3d_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i8_trap">;
+def int_nvvm_sust_b_3d_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i16_trap">;
+def int_nvvm_sust_b_3d_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.3d.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i32_trap">;
+def int_nvvm_sust_b_3d_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i8_trap">;
+def int_nvvm_sust_b_3d_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i16_trap">;
+def int_nvvm_sust_b_3d_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i32_trap">;
+def int_nvvm_sust_b_3d_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v4i8_trap">;
+def int_nvvm_sust_b_3d_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v4i16_trap">;
+def int_nvvm_sust_b_3d_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.3d.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v4i32_trap">;
+
+// Formatted
+
+def int_nvvm_sust_p_1d_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_i8_trap">;
+def int_nvvm_sust_p_1d_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_i16_trap">;
+def int_nvvm_sust_p_1d_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.1d.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_i32_trap">;
+def int_nvvm_sust_p_1d_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_v2i8_trap">;
+def int_nvvm_sust_p_1d_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_v2i16_trap">;
+def int_nvvm_sust_p_1d_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.1d.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_v2i32_trap">;
+def int_nvvm_sust_p_1d_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_v4i8_trap">;
+def int_nvvm_sust_p_1d_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_v4i16_trap">;
+def int_nvvm_sust_p_1d_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.1d.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_v4i32_trap">;
+
+
+def int_nvvm_sust_p_1d_array_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.array.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_i8_trap">;
+def int_nvvm_sust_p_1d_array_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.array.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_i16_trap">;
+def int_nvvm_sust_p_1d_array_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.1d.array.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_i32_trap">;
+def int_nvvm_sust_p_1d_array_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.array.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_v2i8_trap">;
+def int_nvvm_sust_p_1d_array_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.array.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_v2i16_trap">;
+def int_nvvm_sust_p_1d_array_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.1d.array.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_v2i32_trap">;
+def int_nvvm_sust_p_1d_array_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.array.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_v4i8_trap">;
+def int_nvvm_sust_p_1d_array_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.array.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_v4i16_trap">;
+def int_nvvm_sust_p_1d_array_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.1d.array.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_v4i32_trap">;
+
+
+def int_nvvm_sust_p_2d_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_i8_trap">;
+def int_nvvm_sust_p_2d_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_i16_trap">;
+def int_nvvm_sust_p_2d_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.2d.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_i32_trap">;
+def int_nvvm_sust_p_2d_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_v2i8_trap">;
+def int_nvvm_sust_p_2d_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_v2i16_trap">;
+def int_nvvm_sust_p_2d_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.2d.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_v2i32_trap">;
+def int_nvvm_sust_p_2d_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_v4i8_trap">;
+def int_nvvm_sust_p_2d_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_v4i16_trap">;
+def int_nvvm_sust_p_2d_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.2d.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_v4i32_trap">;
+
+
+def int_nvvm_sust_p_2d_array_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.array.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_i8_trap">;
+def int_nvvm_sust_p_2d_array_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.array.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_i16_trap">;
+def int_nvvm_sust_p_2d_array_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.2d.array.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_i32_trap">;
+def int_nvvm_sust_p_2d_array_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.array.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_v2i8_trap">;
+def int_nvvm_sust_p_2d_array_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.array.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_v2i16_trap">;
+def int_nvvm_sust_p_2d_array_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.2d.array.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_v2i32_trap">;
+def int_nvvm_sust_p_2d_array_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.array.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_v4i8_trap">;
+def int_nvvm_sust_p_2d_array_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.array.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_v4i16_trap">;
+def int_nvvm_sust_p_2d_array_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.2d.array.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_v4i32_trap">;
+
+
+def int_nvvm_sust_p_3d_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.3d.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_i8_trap">;
+def int_nvvm_sust_p_3d_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.3d.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_i16_trap">;
+def int_nvvm_sust_p_3d_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.3d.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_i32_trap">;
+def int_nvvm_sust_p_3d_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.3d.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_v2i8_trap">;
+def int_nvvm_sust_p_3d_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.3d.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_v2i16_trap">;
+def int_nvvm_sust_p_3d_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.3d.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_v2i32_trap">;
+def int_nvvm_sust_p_3d_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.3d.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_v4i8_trap">;
+def int_nvvm_sust_p_3d_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.3d.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_v4i16_trap">;
+def int_nvvm_sust_p_3d_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.3d.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_v4i32_trap">;
+
+
+
// Old PTX back-end intrinsics retained here for backwards-compatibility
multiclass PTXReadSpecialRegisterIntrinsic_v4i32<string prefix> {
diff --git a/include/llvm/IR/IntrinsicsX86.td b/include/llvm/IR/IntrinsicsX86.td
index 8f64b5d..36d93fe 100644
--- a/include/llvm/IR/IntrinsicsX86.td
+++ b/include/llvm/IR/IntrinsicsX86.td
@@ -18,6 +18,15 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
}
//===----------------------------------------------------------------------===//
+// Read Time Stamp Counter.
+let TargetPrefix = "x86" in {
+ def int_x86_rdtsc : GCCBuiltin<"__builtin_ia32_rdtsc">,
+ Intrinsic<[llvm_i64_ty], [], []>;
+ def int_x86_rdtscp : GCCBuiltin<"__builtin_ia32_rdtscp">,
+ Intrinsic<[llvm_i64_ty], [llvm_ptr_ty], [IntrReadWriteArgMem]>;
+}
+
+//===----------------------------------------------------------------------===//
// 3DNow!
let TargetPrefix = "x86" in {
@@ -1120,6 +1129,27 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_vperm2f128_si256">,
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt_d_512:
+ GCCBuiltin<"__builtin_ia32_vpermt2vard512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt_q_512:
+ GCCBuiltin<"__builtin_ia32_vpermt2varq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt_ps_512:
+ GCCBuiltin<"__builtin_ia32_vpermt2varps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16i32_ty,
+ llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt_pd_512:
+ GCCBuiltin<"__builtin_ia32_vpermt2varpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8i64_ty,
+ llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
+
}
// Vector blend
@@ -2999,141 +3029,104 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Gather and Scatter ops
let TargetPrefix = "x86" in {
- def int_x86_avx512_gather_dpd_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherdpd512">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_i8_ty,
- llvm_v8i32_ty, llvm_ptr_ty, llvm_i32_ty],
+ def int_x86_avx512_gather_dpd_512 : GCCBuiltin<"__builtin_ia32_gathersiv8df">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
+ llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
[IntrReadArgMem]>;
- def int_x86_avx512_gather_dps_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherdps512">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_i16_ty,
- llvm_v16i32_ty, llvm_ptr_ty, llvm_i32_ty],
+ def int_x86_avx512_gather_dps_512 : GCCBuiltin<"__builtin_ia32_gathersiv16sf">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_ptr_ty,
+ llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
[IntrReadArgMem]>;
- def int_x86_avx512_gather_qpd_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherqpd512">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_i8_ty,
- llvm_v8i64_ty, llvm_ptr_ty, llvm_i32_ty],
+ def int_x86_avx512_gather_qpd_512 : GCCBuiltin<"__builtin_ia32_gatherdiv8df">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
+ llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
[IntrReadArgMem]>;
- def int_x86_avx512_gather_qps_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherqps512">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_i8_ty,
- llvm_v8i64_ty, llvm_ptr_ty, llvm_i32_ty],
+ def int_x86_avx512_gather_qps_512 : GCCBuiltin<"__builtin_ia32_gatherdiv16sf">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_ptr_ty,
+ llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
[IntrReadArgMem]>;
- def int_x86_avx512_gather_dpd_512 : GCCBuiltin<"__builtin_ia32_gatherdpd512">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8i32_ty, llvm_ptr_ty,
- llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_x86_avx512_gather_dps_512 : GCCBuiltin<"__builtin_ia32_gatherdps512">,
- Intrinsic<[llvm_v16f32_ty], [llvm_v16i32_ty, llvm_ptr_ty,
- llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_x86_avx512_gather_qpd_512 : GCCBuiltin<"__builtin_ia32_gatherqpd512">,
- Intrinsic<[llvm_v8f64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
- llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_x86_avx512_gather_qps_512 : GCCBuiltin<"__builtin_ia32_gatherqps512">,
- Intrinsic<[llvm_v8f32_ty], [llvm_v8i64_ty, llvm_ptr_ty,
- llvm_i32_ty],
- [IntrReadArgMem]>;
-
- def int_x86_avx512_gather_dpq_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherdpq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_i8_ty,
- llvm_v8i32_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_x86_avx512_gather_dpi_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherdpi512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_i16_ty,
- llvm_v16i32_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_x86_avx512_gather_qpq_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherqpq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_i8_ty,
- llvm_v8i64_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_x86_avx512_gather_qpi_mask_512 : GCCBuiltin<"__builtin_ia32_mask_gatherqpi512">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_i8_ty,
- llvm_v8i64_ty, llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_x86_avx512_gather_dpq_512 : GCCBuiltin<"__builtin_ia32_gatherdpq512">,
- Intrinsic<[llvm_v8i64_ty], [llvm_v8i32_ty, llvm_ptr_ty,
- llvm_i32_ty],
+ def int_x86_avx512_gather_dpq_512 : GCCBuiltin<"__builtin_ia32_gathersiv8di">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
+ llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
[IntrReadArgMem]>;
- def int_x86_avx512_gather_dpi_512 : GCCBuiltin<"__builtin_ia32_gatherdpi512">,
- Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_ptr_ty,
- llvm_i32_ty],
+ def int_x86_avx512_gather_dpi_512 : GCCBuiltin<"__builtin_ia32_gathersiv16si">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_ptr_ty,
+ llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
[IntrReadArgMem]>;
- def int_x86_avx512_gather_qpq_512 : GCCBuiltin<"__builtin_ia32_gatherqpq512">,
+ def int_x86_avx512_gather_qpq_512 : GCCBuiltin<"__builtin_ia32_gatherdiv8di">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
- llvm_i32_ty],
+ llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
[IntrReadArgMem]>;
- def int_x86_avx512_gather_qpi_512 : GCCBuiltin<"__builtin_ia32_gatherqpi512">,
- Intrinsic<[llvm_v8i32_ty], [llvm_v8i64_ty, llvm_ptr_ty,
- llvm_i32_ty],
+ def int_x86_avx512_gather_qpi_512 : GCCBuiltin<"__builtin_ia32_gatherdiv16si">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_ptr_ty,
+ llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
[IntrReadArgMem]>;
+
// scatter
- def int_x86_avx512_scatter_dpd_mask_512 : GCCBuiltin<"__builtin_ia32_mask_scatterdpd512">,
+ def int_x86_avx512_scatter_dpd_512 : GCCBuiltin<"__builtin_ia32_scattersiv8df">,
Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
llvm_v8i32_ty, llvm_v8f64_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
- def int_x86_avx512_scatter_dps_mask_512 : GCCBuiltin<"__builtin_ia32_mask_scatterdps512">,
+ def int_x86_avx512_scatter_dps_512 : GCCBuiltin<"__builtin_ia32_scattersiv16sf">,
Intrinsic<[], [llvm_ptr_ty, llvm_i16_ty,
llvm_v16i32_ty, llvm_v16f32_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
- def int_x86_avx512_scatter_qpd_mask_512 : GCCBuiltin<"__builtin_ia32_mask_scatterqpd512">,
+ def int_x86_avx512_scatter_qpd_512 : GCCBuiltin<"__builtin_ia32_scatterdiv8df">,
Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
llvm_v8i64_ty, llvm_v8f64_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
- def int_x86_avx512_scatter_qps_mask_512 : GCCBuiltin<"__builtin_ia32_mask_scatterqps512">,
+ def int_x86_avx512_scatter_qps_512 : GCCBuiltin<"__builtin_ia32_scatterdiv16sf">,
Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
llvm_v8i64_ty, llvm_v8f32_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
- def int_x86_avx512_scatter_dpd_512 : GCCBuiltin<"__builtin_ia32_scatterdpd512">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v8i32_ty, llvm_v8f64_ty,
- llvm_i32_ty],
- [IntrReadWriteArgMem]>;
- def int_x86_avx512_scatter_dps_512 : GCCBuiltin<"__builtin_ia32_scatterdps512">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v16i32_ty, llvm_v16f32_ty,
- llvm_i32_ty],
- [IntrReadWriteArgMem]>;
- def int_x86_avx512_scatter_qpd_512 : GCCBuiltin<"__builtin_ia32_scatterqpd512">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v8i64_ty, llvm_v8f64_ty,
- llvm_i32_ty],
- [IntrReadWriteArgMem]>;
- def int_x86_avx512_scatter_qps_512 : GCCBuiltin<"__builtin_ia32_scatterqps512">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v8i64_ty, llvm_v8f32_ty,
- llvm_i32_ty],
- [IntrReadWriteArgMem]>;
- def int_x86_avx512_scatter_dpq_mask_512 : GCCBuiltin<"__builtin_ia32_mask_scatterdpq512">,
- Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty, llvm_v8i32_ty,
- llvm_v8i64_ty, llvm_i32_ty],
+ def int_x86_avx512_scatter_dpq_512 : GCCBuiltin<"__builtin_ia32_scattersiv8di">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
+ llvm_v8i32_ty, llvm_v8i64_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
- def int_x86_avx512_scatter_dpi_mask_512 : GCCBuiltin<"__builtin_ia32_mask_scatterdpi512">,
+ def int_x86_avx512_scatter_dpi_512 : GCCBuiltin<"__builtin_ia32_scattersiv16si">,
Intrinsic<[], [llvm_ptr_ty, llvm_i16_ty,
llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
- def int_x86_avx512_scatter_qpq_mask_512 : GCCBuiltin<"__builtin_ia32_mask_scatterqpq512">,
- Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
- llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty],
+ def int_x86_avx512_scatter_qpq_512 : GCCBuiltin<"__builtin_ia32_scatterdiv8di">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_i32_ty],
[IntrReadWriteArgMem]>;
- def int_x86_avx512_scatter_qpi_mask_512 : GCCBuiltin<"__builtin_ia32_mask_scatterqpi512">,
- Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
- llvm_v8i64_ty, llvm_v8i32_ty, llvm_i32_ty],
+ def int_x86_avx512_scatter_qpi_512 : GCCBuiltin<"__builtin_ia32_scatterdiv16si">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty, llvm_v8i64_ty, llvm_v8i32_ty,
+ llvm_i32_ty],
[IntrReadWriteArgMem]>;
- def int_x86_avx512_scatter_dpq_512 : GCCBuiltin<"__builtin_ia32_scatterdpq512">,
- Intrinsic<[], [llvm_ptr_ty,
- llvm_v8i32_ty, llvm_v8i64_ty, llvm_i32_ty],
- []>;
- def int_x86_avx512_scatter_dpi_512 : GCCBuiltin<"__builtin_ia32_scatterdpi512">,
- Intrinsic<[], [llvm_ptr_ty,
- llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty],
- []>;
- def int_x86_avx512_scatter_qpq_512 : GCCBuiltin<"__builtin_ia32_scatterqpq512">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v8i64_ty, llvm_v8i64_ty,
- llvm_i32_ty],
- []>;
- def int_x86_avx512_scatter_qpi_512 : GCCBuiltin<"__builtin_ia32_scatterqpi512">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v8i64_ty, llvm_v8i32_ty,
- llvm_i32_ty],
- []>;
+ // gather prefetch
+ def int_x86_avx512_gatherpf_dpd_512 : GCCBuiltin<"__builtin_ia32_gatherpfdpd">,
+ Intrinsic<[], [llvm_i8_ty, llvm_v8i32_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_gatherpf_dps_512 : GCCBuiltin<"__builtin_ia32_gatherpfdps">,
+ Intrinsic<[], [llvm_i16_ty, llvm_v16i32_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_gatherpf_qpd_512 : GCCBuiltin<"__builtin_ia32_gatherpfqpd">,
+ Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_gatherpf_qps_512 : GCCBuiltin<"__builtin_ia32_gatherpfqps">,
+ Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+
+ // scatter prefetch
+ def int_x86_avx512_scatterpf_dpd_512 : GCCBuiltin<"__builtin_ia32_scatterpfdpd">,
+ Intrinsic<[], [llvm_i8_ty, llvm_v8i32_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_scatterpf_dps_512 : GCCBuiltin<"__builtin_ia32_scatterpfdps">,
+ Intrinsic<[], [llvm_i16_ty, llvm_v16i32_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_scatterpf_qpd_512 : GCCBuiltin<"__builtin_ia32_scatterpfqpd">,
+ Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_scatterpf_qps_512 : GCCBuiltin<"__builtin_ia32_scatterpfqps">,
+ Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
}
// AVX-512 conflict detection
diff --git a/include/llvm/IR/LLVMContext.h b/include/llvm/IR/LLVMContext.h
index ae4859a..4d940d5 100644
--- a/include/llvm/IR/LLVMContext.h
+++ b/include/llvm/IR/LLVMContext.h
@@ -29,6 +29,8 @@ class Module;
class SMDiagnostic;
class DiagnosticInfo;
template <typename T> class SmallVectorImpl;
+class Function;
+class DebugLoc;
/// This is an important class for using LLVM in a threaded context. It
/// (opaquely) owns and manages the core "global" data of LLVM's core
@@ -70,6 +72,10 @@ public:
/// \see LLVMContext::diagnose.
typedef void (*DiagnosticHandlerTy)(const DiagnosticInfo &DI, void *Context);
+ /// Defines the type of a yield callback.
+ /// \see LLVMContext::setYieldCallback.
+ typedef void (*YieldCallbackTy)(LLVMContext *Context, void *OpaqueHandle);
+
/// setInlineAsmDiagnosticHandler - This method sets a handler that is invoked
/// when problems with inline asm are detected by the backend. The first
/// argument is a function pointer and the second is a context pointer that
@@ -78,7 +84,7 @@ public:
/// LLVMContext doesn't take ownership or interpret either of these
/// pointers.
void setInlineAsmDiagnosticHandler(InlineAsmDiagHandlerTy DiagHandler,
- void *DiagContext = 0);
+ void *DiagContext = nullptr);
/// getInlineAsmDiagnosticHandler - Return the diagnostic handler set by
/// setInlineAsmDiagnosticHandler.
@@ -96,7 +102,7 @@ public:
/// LLVMContext doesn't take ownership or interpret either of these
/// pointers.
void setDiagnosticHandler(DiagnosticHandlerTy DiagHandler,
- void *DiagContext = 0);
+ void *DiagContext = nullptr);
/// getDiagnosticHandler - Return the diagnostic handler set by
/// setDiagnosticHandler.
@@ -116,6 +122,32 @@ public:
/// for RS_Error, "warning: " for RS_Warning, and "note: " for RS_Note.
void diagnose(const DiagnosticInfo &DI);
+ /// \brief Registers a yield callback with the given context.
+ ///
+ /// The yield callback function may be called by LLVM to transfer control back
+ /// to the client that invoked the LLVM compilation. This can be used to yield
+ /// control of the thread, or perform periodic work needed by the client.
+ /// There is no guaranteed frequency at which callbacks must occur; in fact,
+ /// the client is not guaranteed to ever receive this callback. It is at the
+ /// sole discretion of LLVM to do so and only if it can guarantee that
+ /// suspending the thread won't block any forward progress in other LLVM
+ /// contexts in the same process.
+ ///
+ /// At a suspend point, the state of the current LLVM context is intentionally
+ /// undefined. No assumptions about it can or should be made. Only LLVM
+ /// context API calls that explicitly state that they can be used during a
+ /// yield callback are allowed to be used. Any other API calls into the
+ /// context are not supported until the yield callback function returns
+ /// control to LLVM. Other LLVM contexts are unaffected by this restriction.
+ void setYieldCallback(YieldCallbackTy Callback, void *OpaqueHandle);
+
+ /// \brief Calls the yield callback (if applicable).
+ ///
+ /// This transfers control of the current thread back to the client, which may
+ /// suspend the current thread. Only call this method when LLVM doesn't hold
+ /// any global mutex or cannot block the execution in another LLVM context.
+ void yield();
+
/// emitError - Emit an error message to the currently installed error handler
/// with optional location information. This function returns, so code should
/// be prepared to drop the erroneous construct on the floor and "not crash".
diff --git a/include/llvm/IR/LegacyPassManagers.h b/include/llvm/IR/LegacyPassManagers.h
index 5c9dccd..f6065a4 100644
--- a/include/llvm/IR/LegacyPassManagers.h
+++ b/include/llvm/IR/LegacyPassManagers.h
@@ -120,11 +120,11 @@ class PassManagerPrettyStackEntry : public PrettyStackTraceEntry {
Module *M;
public:
explicit PassManagerPrettyStackEntry(Pass *p)
- : P(p), V(0), M(0) {} // When P is releaseMemory'd.
+ : P(p), V(nullptr), M(nullptr) {} // When P is releaseMemory'd.
PassManagerPrettyStackEntry(Pass *p, Value &v)
- : P(p), V(&v), M(0) {} // When P is run on V
+ : P(p), V(&v), M(nullptr) {} // When P is run on V
PassManagerPrettyStackEntry(Pass *p, Module &m)
- : P(p), V(0), M(&m) {} // When P is run on M
+ : P(p), V(nullptr), M(&m) {} // When P is run on M
/// print - Emit information about this stack frame to OS.
void print(raw_ostream &OS) const override;
@@ -263,7 +263,7 @@ private:
class PMDataManager {
public:
- explicit PMDataManager() : TPM(NULL), Depth(0) {
+ explicit PMDataManager() : TPM(nullptr), Depth(0) {
initializeAnalysisInfo();
}
@@ -303,7 +303,7 @@ public:
void initializeAnalysisInfo() {
AvailableAnalysis.clear();
for (unsigned i = 0; i < PMT_Last; ++i)
- InheritedAnalysis[i] = NULL;
+ InheritedAnalysis[i] = nullptr;
}
// Return true if P preserves high level analysis used by other
@@ -441,7 +441,7 @@ public:
Pass *getAsPass() override { return this; }
/// Pass Manager itself does not invalidate any analysis info.
- void getAnalysisUsage(AnalysisUsage &Info) const override{
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
Info.setPreservesAll();
}
diff --git a/include/llvm/IR/LegacyPassNameParser.h b/include/llvm/IR/LegacyPassNameParser.h
index 1f6bbbc..b72fc4c 100644
--- a/include/llvm/IR/LegacyPassNameParser.h
+++ b/include/llvm/IR/LegacyPassNameParser.h
@@ -43,7 +43,7 @@ class PassNameParser : public PassRegistrationListener,
public cl::parser<const PassInfo*> {
cl::Option *Opt;
public:
- PassNameParser() : Opt(0) {}
+ PassNameParser() : Opt(nullptr) {}
virtual ~PassNameParser();
void initialize(cl::Option &O) {
@@ -62,8 +62,8 @@ public:
inline bool ignorablePass(const PassInfo *P) const {
// Ignore non-selectable and non-constructible passes! Ignore
// non-optimizations.
- return P->getPassArgument() == 0 || *P->getPassArgument() == 0 ||
- P->getNormalCtor() == 0 || ignorablePassImpl(P);
+ return P->getPassArgument() == nullptr || *P->getPassArgument() == 0 ||
+ P->getNormalCtor() == nullptr || ignorablePassImpl(P);
}
// Implement the PassRegistrationListener callbacks used to populate our map
@@ -73,7 +73,7 @@ public:
if (findOption(P->getPassArgument()) != getNumOptions()) {
errs() << "Two passes with the same argument (-"
<< P->getPassArgument() << ") attempted to be registered!\n";
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
addLiteralOption(P->getPassArgument(), P, P->getPassName());
}
diff --git a/include/llvm/IR/MDBuilder.h b/include/llvm/IR/MDBuilder.h
index c07b2bd..37d263b 100644
--- a/include/llvm/IR/MDBuilder.h
+++ b/include/llvm/IR/MDBuilder.h
@@ -15,14 +15,17 @@
#ifndef LLVM_IR_MDBUILDER_H
#define LLVM_IR_MDBUILDER_H
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Metadata.h"
+#include "llvm/Support/DataTypes.h"
+#include <utility>
namespace llvm {
class APInt;
+template <typename T> class ArrayRef;
class LLVMContext;
+class MDNode;
+class MDString;
+class StringRef;
class MDBuilder {
LLVMContext &Context;
@@ -31,9 +34,7 @@ public:
MDBuilder(LLVMContext &context) : Context(context) {}
/// \brief Return the given string as metadata.
- MDString *createString(StringRef Str) {
- return MDString::get(Context, Str);
- }
+ MDString *createString(StringRef Str);
//===------------------------------------------------------------------===//
// FPMath metadata.
@@ -42,55 +43,24 @@ public:
/// \brief Return metadata with the given settings. The special value 0.0
/// for the Accuracy parameter indicates the default (maximal precision)
/// setting.
- MDNode *createFPMath(float Accuracy) {
- if (Accuracy == 0.0)
- return 0;
- assert(Accuracy > 0.0 && "Invalid fpmath accuracy!");
- Value *Op = ConstantFP::get(Type::getFloatTy(Context), Accuracy);
- return MDNode::get(Context, Op);
- }
+ MDNode *createFPMath(float Accuracy);
//===------------------------------------------------------------------===//
// Prof metadata.
//===------------------------------------------------------------------===//
/// \brief Return metadata containing two branch weights.
- MDNode *createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight) {
- uint32_t Weights[] = { TrueWeight, FalseWeight };
- return createBranchWeights(Weights);
- }
+ MDNode *createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight);
/// \brief Return metadata containing a number of branch weights.
- MDNode *createBranchWeights(ArrayRef<uint32_t> Weights) {
- assert(Weights.size() >= 2 && "Need at least two branch weights!");
-
- SmallVector<Value *, 4> Vals(Weights.size()+1);
- Vals[0] = createString("branch_weights");
-
- Type *Int32Ty = Type::getInt32Ty(Context);
- for (unsigned i = 0, e = Weights.size(); i != e; ++i)
- Vals[i+1] = ConstantInt::get(Int32Ty, Weights[i]);
-
- return MDNode::get(Context, Vals);
- }
+ MDNode *createBranchWeights(ArrayRef<uint32_t> Weights);
//===------------------------------------------------------------------===//
// Range metadata.
//===------------------------------------------------------------------===//
/// \brief Return metadata describing the range [Lo, Hi).
- MDNode *createRange(const APInt &Lo, const APInt &Hi) {
- assert(Lo.getBitWidth() == Hi.getBitWidth() && "Mismatched bitwidths!");
- // If the range is everything then it is useless.
- if (Hi == Lo)
- return 0;
-
- // Return the range [Lo, Hi).
- Type *Ty = IntegerType::get(Context, Lo.getBitWidth());
- Value *Range[2] = { ConstantInt::get(Ty, Lo), ConstantInt::get(Ty, Hi) };
- return MDNode::get(Context, Range);
- }
-
+ MDNode *createRange(const APInt &Lo, const APInt &Hi);
//===------------------------------------------------------------------===//
// TBAA metadata.
@@ -99,41 +69,17 @@ public:
/// \brief Return metadata appropriate for a TBAA root node. Each returned
/// node is distinct from all other metadata and will never be identified
/// (uniqued) with anything else.
- MDNode *createAnonymousTBAARoot() {
- // To ensure uniqueness the root node is self-referential.
- MDNode *Dummy = MDNode::getTemporary(Context, ArrayRef<Value*>());
- MDNode *Root = MDNode::get(Context, Dummy);
- // At this point we have
- // !0 = metadata !{} <- dummy
- // !1 = metadata !{metadata !0} <- root
- // Replace the dummy operand with the root node itself and delete the dummy.
- Root->replaceOperandWith(0, Root);
- MDNode::deleteTemporary(Dummy);
- // We now have
- // !1 = metadata !{metadata !1} <- self-referential root
- return Root;
- }
+ MDNode *createAnonymousTBAARoot();
/// \brief Return metadata appropriate for a TBAA root node with the given
/// name. This may be identified (uniqued) with other roots with the same
/// name.
- MDNode *createTBAARoot(StringRef Name) {
- return MDNode::get(Context, createString(Name));
- }
+ MDNode *createTBAARoot(StringRef Name);
/// \brief Return metadata for a non-root TBAA node with the given name,
/// parent in the TBAA tree, and value for 'pointsToConstantMemory'.
MDNode *createTBAANode(StringRef Name, MDNode *Parent,
- bool isConstant = false) {
- if (isConstant) {
- Constant *Flags = ConstantInt::get(Type::getInt64Ty(Context), 1);
- Value *Ops[3] = { createString(Name), Parent, Flags };
- return MDNode::get(Context, Ops);
- } else {
- Value *Ops[2] = { createString(Name), Parent };
- return MDNode::get(Context, Ops);
- }
- }
+ bool isConstant = false);
struct TBAAStructField {
uint64_t Offset;
@@ -145,49 +91,23 @@ public:
/// \brief Return metadata for a tbaa.struct node with the given
/// struct field descriptions.
- MDNode *createTBAAStructNode(ArrayRef<TBAAStructField> Fields) {
- SmallVector<Value *, 4> Vals(Fields.size() * 3);
- Type *Int64 = IntegerType::get(Context, 64);
- for (unsigned i = 0, e = Fields.size(); i != e; ++i) {
- Vals[i * 3 + 0] = ConstantInt::get(Int64, Fields[i].Offset);
- Vals[i * 3 + 1] = ConstantInt::get(Int64, Fields[i].Size);
- Vals[i * 3 + 2] = Fields[i].TBAA;
- }
- return MDNode::get(Context, Vals);
- }
+ MDNode *createTBAAStructNode(ArrayRef<TBAAStructField> Fields);
/// \brief Return metadata for a TBAA struct node in the type DAG
/// with the given name, a list of pairs (offset, field type in the type DAG).
- MDNode *createTBAAStructTypeNode(StringRef Name,
- ArrayRef<std::pair<MDNode*, uint64_t> > Fields) {
- SmallVector<Value *, 4> Ops(Fields.size() * 2 + 1);
- Type *Int64 = IntegerType::get(Context, 64);
- Ops[0] = createString(Name);
- for (unsigned i = 0, e = Fields.size(); i != e; ++i) {
- Ops[i * 2 + 1] = Fields[i].first;
- Ops[i * 2 + 2] = ConstantInt::get(Int64, Fields[i].second);
- }
- return MDNode::get(Context, Ops);
- }
+ MDNode *
+ createTBAAStructTypeNode(StringRef Name,
+ ArrayRef<std::pair<MDNode *, uint64_t>> Fields);
/// \brief Return metadata for a TBAA scalar type node with the
/// given name, an offset and a parent in the TBAA type DAG.
MDNode *createTBAAScalarTypeNode(StringRef Name, MDNode *Parent,
- uint64_t Offset = 0) {
- ConstantInt *Off = ConstantInt::get(Type::getInt64Ty(Context), Offset);
- Value *Ops[3] = { createString(Name), Parent, Off };
- return MDNode::get(Context, Ops);
- }
+ uint64_t Offset = 0);
/// \brief Return metadata for a TBAA tag node with the given
/// base type, access type and offset relative to the base type.
MDNode *createTBAAStructTagNode(MDNode *BaseType, MDNode *AccessType,
- uint64_t Offset) {
- Type *Int64 = IntegerType::get(Context, 64);
- Value *Ops[3] = { BaseType, AccessType, ConstantInt::get(Int64, Offset) };
- return MDNode::get(Context, Ops);
- }
-
+ uint64_t Offset);
};
} // end namespace llvm
diff --git a/include/llvm/IR/Metadata.h b/include/llvm/IR/Metadata.h
index d054fbb..7a0ca88 100644
--- a/include/llvm/IR/Metadata.h
+++ b/include/llvm/IR/Metadata.h
@@ -218,7 +218,7 @@ class NamedMDNode : public ilist_node<NamedMDNode> {
friend class NamedMDNode;
public:
- op_iterator_impl() : Node(0), Idx(0) { }
+ op_iterator_impl() : Node(nullptr), Idx(0) { }
bool operator==(const op_iterator_impl &o) const { return Idx == o.Idx; }
bool operator!=(const op_iterator_impl &o) const { return Idx != o.Idx; }
@@ -272,7 +272,7 @@ public:
StringRef getName() const;
/// print - Implement operator<< on NamedMDNode.
- void print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW = 0) const;
+ void print(raw_ostream &ROS) const;
/// dump() - Allow printing of NamedMDNodes from the debugger.
void dump() const;
diff --git a/include/llvm/IR/Module.h b/include/llvm/IR/Module.h
index f0d4002..0c309e8 100644
--- a/include/llvm/IR/Module.h
+++ b/include/llvm/IR/Module.h
@@ -285,33 +285,29 @@ public:
/// @name Generic Value Accessors
/// @{
- /// getNamedValue - Return the global value in the module with
- /// the specified name, of arbitrary type. This method returns null
- /// if a global with the specified name is not found.
+ /// Return the global value in the module with the specified name, of
+ /// arbitrary type. This method returns null if a global with the specified
+ /// name is not found.
GlobalValue *getNamedValue(StringRef Name) const;
- /// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
- /// This ID is uniqued across modules in the current LLVMContext.
+ /// Return a unique non-zero ID for the specified metadata kind. This ID is
+ /// uniqued across modules in the current LLVMContext.
unsigned getMDKindID(StringRef Name) const;
- /// getMDKindNames - Populate client supplied SmallVector with the name for
- /// custom metadata IDs registered in this LLVMContext.
+ /// Populate client supplied SmallVector with the name for custom metadata IDs
+ /// registered in this LLVMContext.
void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
-
- typedef DenseMap<StructType*, unsigned, DenseMapInfo<StructType*> >
- NumeredTypesMapTy;
-
- /// getTypeByName - Return the type with the specified name, or null if there
- /// is none by that name.
+ /// Return the type with the specified name, or null if there is none by that
+ /// name.
StructType *getTypeByName(StringRef Name) const;
/// @}
/// @name Function Accessors
/// @{
- /// getOrInsertFunction - Look up the specified function in the module symbol
- /// table. Four possibilities:
+ /// Look up the specified function in the module symbol table. Four
+ /// possibilities:
/// 1. If it does not exist, add a prototype for the function and return it.
/// 2. If it exists, and has a local linkage, the existing function is
/// renamed and a new one is inserted.
@@ -324,33 +320,32 @@ public:
Constant *getOrInsertFunction(StringRef Name, FunctionType *T);
- /// getOrInsertFunction - Look up the specified function in the module symbol
- /// table. If it does not exist, add a prototype for the function and return
- /// it. This function guarantees to return a constant of pointer to the
- /// specified function type or a ConstantExpr BitCast of that type if the
- /// named function has a different type. This version of the method takes a
- /// null terminated list of function arguments, which makes it easier for
- /// clients to use.
+ /// Look up the specified function in the module symbol table. If it does not
+ /// exist, add a prototype for the function and return it. This function
+ /// guarantees to return a constant of pointer to the specified function type
+ /// or a ConstantExpr BitCast of that type if the named function has a
+ /// different type. This version of the method takes a null terminated list of
+ /// function arguments, which makes it easier for clients to use.
Constant *getOrInsertFunction(StringRef Name,
AttributeSet AttributeList,
Type *RetTy, ...) END_WITH_NULL;
- /// getOrInsertFunction - Same as above, but without the attributes.
+ /// Same as above, but without the attributes.
Constant *getOrInsertFunction(StringRef Name, Type *RetTy, ...)
END_WITH_NULL;
- /// getFunction - Look up the specified function in the module symbol table.
- /// If it does not exist, return null.
+ /// Look up the specified function in the module symbol table. If it does not
+ /// exist, return null.
Function *getFunction(StringRef Name) const;
/// @}
/// @name Global Variable Accessors
/// @{
- /// getGlobalVariable - Look up the specified global variable in the module
- /// symbol table. If it does not exist, return null. If AllowInternal is set
- /// to true, this function will return types that have InternalLinkage. By
- /// default, these types are not returned.
+ /// Look up the specified global variable in the module symbol table. If it
+ /// does not exist, return null. If AllowInternal is set to true, this
+ /// function will return types that have InternalLinkage. By default, these
+ /// types are not returned.
const GlobalVariable *getGlobalVariable(StringRef Name,
bool AllowInternal = false) const {
return const_cast<Module *>(this)->getGlobalVariable(Name, AllowInternal);
@@ -358,9 +353,9 @@ public:
GlobalVariable *getGlobalVariable(StringRef Name, bool AllowInternal = false);
- /// getNamedGlobal - Return the global variable in the module with the
- /// specified name, of arbitrary type. This method returns null if a global
- /// with the specified name is not found.
+ /// Return the global variable in the module with the specified name, of
+ /// arbitrary type. This method returns null if a global with the specified
+ /// name is not found.
GlobalVariable *getNamedGlobal(StringRef Name) {
return getGlobalVariable(Name, true);
}
@@ -368,8 +363,7 @@ public:
return const_cast<Module *>(this)->getNamedGlobal(Name);
}
- /// getOrInsertGlobal - Look up the specified global in the module symbol
- /// table.
+ /// Look up the specified global in the module symbol table.
/// 1. If it does not exist, add a declaration of the global and return it.
/// 2. Else, the global exists but has the wrong type: return the function
/// with a constantexpr cast to the right type.
@@ -381,53 +375,49 @@ public:
/// @name Global Alias Accessors
/// @{
- /// getNamedAlias - Return the global alias in the module with the
- /// specified name, of arbitrary type. This method returns null if a global
- /// with the specified name is not found.
+ /// Return the global alias in the module with the specified name, of
+ /// arbitrary type. This method returns null if a global with the specified
+ /// name is not found.
GlobalAlias *getNamedAlias(StringRef Name) const;
/// @}
/// @name Named Metadata Accessors
/// @{
- /// getNamedMetadata - Return the first NamedMDNode in the module with the
- /// specified name. This method returns null if a NamedMDNode with the
- /// specified name is not found.
+ /// Return the first NamedMDNode in the module with the specified name. This
+ /// method returns null if a NamedMDNode with the specified name is not found.
NamedMDNode *getNamedMetadata(const Twine &Name) const;
- /// getOrInsertNamedMetadata - Return the named MDNode in the module
- /// with the specified name. This method returns a new NamedMDNode if a
- /// NamedMDNode with the specified name is not found.
+ /// Return the named MDNode in the module with the specified name. This method
+ /// returns a new NamedMDNode if a NamedMDNode with the specified name is not
+ /// found.
NamedMDNode *getOrInsertNamedMetadata(StringRef Name);
- /// eraseNamedMetadata - Remove the given NamedMDNode from this module
- /// and delete it.
+ /// Remove the given NamedMDNode from this module and delete it.
void eraseNamedMetadata(NamedMDNode *NMD);
/// @}
/// @name Module Flags Accessors
/// @{
- /// getModuleFlagsMetadata - Returns the module flags in the provided vector.
+ /// Returns the module flags in the provided vector.
void getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const;
/// Return the corresponding value if Key appears in module flags, otherwise
/// return null.
Value *getModuleFlag(StringRef Key) const;
- /// getModuleFlagsMetadata - Returns the NamedMDNode in the module that
- /// represents module-level flags. This method returns null if there are no
- /// module-level flags.
+ /// Returns the NamedMDNode in the module that represents module-level flags.
+ /// This method returns null if there are no module-level flags.
NamedMDNode *getModuleFlagsMetadata() const;
- /// getOrInsertModuleFlagsMetadata - Returns the NamedMDNode in the module
- /// that represents module-level flags. If module-level flags aren't found,
- /// it creates the named metadata that contains them.
+ /// Returns the NamedMDNode in the module that represents module-level flags.
+ /// If module-level flags aren't found, it creates the named metadata that
+ /// contains them.
NamedMDNode *getOrInsertModuleFlagsMetadata();
- /// addModuleFlag - Add a module-level flag to the module-level flags
- /// metadata. It will create the module-level flags named metadata if it
- /// doesn't already exist.
+ /// Add a module-level flag to the module-level flags metadata. It will create
+ /// the module-level flags named metadata if it doesn't already exist.
void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Value *Val);
void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, uint32_t Val);
void addModuleFlag(MDNode *Node);
@@ -436,31 +426,31 @@ public:
/// @name Materialization
/// @{
- /// setMaterializer - Sets the GVMaterializer to GVM. This module must not
- /// yet have a Materializer. To reset the materializer for a module that
- /// already has one, call MaterializeAllPermanently first. Destroying this
- /// module will destroy its materializer without materializing any more
- /// GlobalValues. Without destroying the Module, there is no way to detach or
- /// destroy a materializer without materializing all the GVs it controls, to
- /// avoid leaving orphan unmaterialized GVs.
+ /// Sets the GVMaterializer to GVM. This module must not yet have a
+ /// Materializer. To reset the materializer for a module that already has one,
+ /// call MaterializeAllPermanently first. Destroying this module will destroy
+ /// its materializer without materializing any more GlobalValues. Without
+ /// destroying the Module, there is no way to detach or destroy a materializer
+ /// without materializing all the GVs it controls, to avoid leaving orphan
+ /// unmaterialized GVs.
void setMaterializer(GVMaterializer *GVM);
- /// getMaterializer - Retrieves the GVMaterializer, if any, for this Module.
+ /// Retrieves the GVMaterializer, if any, for this Module.
GVMaterializer *getMaterializer() const { return Materializer.get(); }
- /// isMaterializable - True if the definition of GV has yet to be materialized
- /// from the GVMaterializer.
+ /// True if the definition of GV has yet to be materializedfrom the
+ /// GVMaterializer.
bool isMaterializable(const GlobalValue *GV) const;
- /// isDematerializable - Returns true if this GV was loaded from this Module's
- /// GVMaterializer and the GVMaterializer knows how to dematerialize the GV.
+ /// Returns true if this GV was loaded from this Module's GVMaterializer and
+ /// the GVMaterializer knows how to dematerialize the GV.
bool isDematerializable(const GlobalValue *GV) const;
- /// Materialize - Make sure the GlobalValue is fully read. If the module is
- /// corrupt, this returns true and fills in the optional string with
- /// information about the problem. If successful, this returns false.
- bool Materialize(GlobalValue *GV, std::string *ErrInfo = 0);
- /// Dematerialize - If the GlobalValue is read in, and if the GVMaterializer
- /// supports it, release the memory for the function, and set it up to be
- /// materialized lazily. If !isDematerializable(), this method is a noop.
+ /// Make sure the GlobalValue is fully read. If the module is corrupt, this
+ /// returns true and fills in the optional string with information about the
+ /// problem. If successful, this returns false.
+ bool Materialize(GlobalValue *GV, std::string *ErrInfo = nullptr);
+ /// If the GlobalValue is read in, and if the GVMaterializer supports it,
+ /// release the memory for the function, and set it up to be materialized
+ /// lazily. If !isDematerializable(), this method is a noop.
void Dematerialize(GlobalValue *GV);
/// Make sure all GlobalValues in this Module are fully read.
@@ -598,12 +588,20 @@ public:
/// is delete'd for real. Note that no operations are valid on an object
/// that has "dropped all references", except operator delete.
void dropAllReferences();
+
+/// @}
+/// @name Utility functions for querying Debug information.
+/// @{
+
+ /// \brief Returns the Dwarf Version by checking module flags.
+ unsigned getDwarfVersion() const;
+
/// @}
};
/// An raw_ostream inserter for modules.
inline raw_ostream &operator<<(raw_ostream &O, const Module &M) {
- M.print(O, 0);
+ M.print(O, nullptr);
return O;
}
diff --git a/include/llvm/IR/PassManager.h b/include/llvm/IR/PassManager.h
index c6c530c..cc2a80b 100644
--- a/include/llvm/IR/PassManager.h
+++ b/include/llvm/IR/PassManager.h
@@ -193,7 +193,7 @@ class PassRunAcceptsAnalysisManager {
template <typename T> static BigType f(...);
public:
- enum { Value = sizeof(f<PassT>(0)) == sizeof(SmallType) };
+ enum { Value = sizeof(f<PassT>(nullptr)) == sizeof(SmallType) };
};
/// \brief A template wrapper used to implement the polymorphic API.
@@ -293,7 +293,7 @@ template <typename IRUnitT, typename ResultT> class ResultHasInvalidateMethod {
template <typename T> static BigType f(...);
public:
- enum { Value = sizeof(f<ResultT>(0)) == sizeof(SmallType) };
+ enum { Value = sizeof(f<ResultT>(nullptr)) == sizeof(SmallType) };
};
/// \brief Wrapper to model the analysis result concept.
@@ -480,7 +480,7 @@ public:
///
/// This method should only be called for a single module as there is the
/// expectation that the lifetime of a pass is bounded to that of a module.
- PreservedAnalyses run(Module *M, ModuleAnalysisManager *AM = 0);
+ PreservedAnalyses run(Module *M, ModuleAnalysisManager *AM = nullptr);
template <typename ModulePassT> void addPass(ModulePassT Pass) {
Passes.emplace_back(new ModulePassModel<ModulePassT>(std::move(Pass)));
@@ -524,7 +524,7 @@ public:
Passes.emplace_back(new FunctionPassModel<FunctionPassT>(std::move(Pass)));
}
- PreservedAnalyses run(Function *F, FunctionAnalysisManager *AM = 0);
+ PreservedAnalyses run(Function *F, FunctionAnalysisManager *AM = nullptr);
static StringRef name() { return "FunctionPassManager"; }
@@ -616,7 +616,7 @@ public:
ResultConceptT *ResultConcept =
derived_this()->getCachedResultImpl(PassT::ID(), IR);
if (!ResultConcept)
- return 0;
+ return nullptr;
typedef detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result>
ResultModelT;
@@ -987,7 +987,7 @@ public:
/// \brief Runs the function pass across every function in the module.
PreservedAnalyses run(Module *M, ModuleAnalysisManager *AM) {
- FunctionAnalysisManager *FAM = 0;
+ FunctionAnalysisManager *FAM = nullptr;
if (AM)
// Setup the function analysis manager from its proxy.
FAM = &AM->getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
diff --git a/include/llvm/IR/PredIteratorCache.h b/include/llvm/IR/PredIteratorCache.h
index bf18dfe..02bc583 100644
--- a/include/llvm/IR/PredIteratorCache.h
+++ b/include/llvm/IR/PredIteratorCache.h
@@ -44,7 +44,7 @@ namespace llvm {
if (Entry) return Entry;
SmallVector<BasicBlock*, 32> PredCache(pred_begin(BB), pred_end(BB));
- PredCache.push_back(0); // null terminator.
+ PredCache.push_back(nullptr); // null terminator.
BlockToPredCountMap[BB] = PredCache.size()-1;
diff --git a/include/llvm/IR/SymbolTableListTraits.h b/include/llvm/IR/SymbolTableListTraits.h
index 561ce01..0a5149c 100644
--- a/include/llvm/IR/SymbolTableListTraits.h
+++ b/include/llvm/IR/SymbolTableListTraits.h
@@ -46,19 +46,19 @@ public:
/// getListOwner - Return the object that owns this list. If this is a list
/// of instructions, it returns the BasicBlock that owns them.
ItemParentClass *getListOwner() {
- size_t Offset(size_t(&((ItemParentClass*)0->*ItemParentClass::
- getSublistAccess(static_cast<ValueSubClass*>(0)))));
+ size_t Offset(size_t(&((ItemParentClass*)nullptr->*ItemParentClass::
+ getSublistAccess(static_cast<ValueSubClass*>(nullptr)))));
iplist<ValueSubClass>* Anchor(static_cast<iplist<ValueSubClass>*>(this));
return reinterpret_cast<ItemParentClass*>(reinterpret_cast<char*>(Anchor)-
Offset);
}
static iplist<ValueSubClass> &getList(ItemParentClass *Par) {
- return Par->*(Par->getSublistAccess((ValueSubClass*)0));
+ return Par->*(Par->getSublistAccess((ValueSubClass*)nullptr));
}
static ValueSymbolTable *getSymTab(ItemParentClass *Par) {
- return Par ? toPtr(Par->getValueSymbolTable()) : 0;
+ return Par ? toPtr(Par->getValueSymbolTable()) : nullptr;
}
void addNodeToList(ValueSubClass *V);
diff --git a/include/llvm/IR/Type.h b/include/llvm/IR/Type.h
index 742a0d3..7955587 100644
--- a/include/llvm/IR/Type.h
+++ b/include/llvm/IR/Type.h
@@ -88,7 +88,7 @@ protected:
friend class LLVMContextImpl;
explicit Type(LLVMContext &C, TypeID tid)
: Context(C), IDAndSubclassData(0),
- NumContainedTys(0), ContainedTys(0) {
+ NumContainedTys(0), ContainedTys(nullptr) {
setTypeID(tid);
}
~Type() {}
@@ -265,7 +265,7 @@ public:
/// get the actual size for a particular target, it is reasonable to use the
/// DataLayout subsystem to do this.
///
- bool isSized(SmallPtrSet<const Type*, 4> *Visited = 0) const {
+ bool isSized(SmallPtrSet<const Type*, 4> *Visited = nullptr) const {
// If it's a primitive, it is always sized.
if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
getTypeID() == PointerTyID ||
@@ -419,7 +419,7 @@ private:
/// isSizedDerivedType - Derived types like structures and arrays are sized
/// iff all of the members of the type are sized as well. Since asking for
/// their size is relatively uncommon, move this operation out of line.
- bool isSizedDerivedType(SmallPtrSet<const Type*, 4> *Visited = 0) const;
+ bool isSizedDerivedType(SmallPtrSet<const Type*, 4> *Visited = nullptr) const;
};
// Printing of types.
diff --git a/include/llvm/IR/Use.h b/include/llvm/IR/Use.h
index 340572a..033cd3e 100644
--- a/include/llvm/IR/Use.h
+++ b/include/llvm/IR/Use.h
@@ -60,7 +60,7 @@ public:
/// implicit. The implicit pointer is found via a waymarking algorithm
/// described in the programmer's manual:
///
-/// http://www.llvm.org/docs/ProgrammersManual.html#UserLayout
+/// http://www.llvm.org/docs/ProgrammersManual.html#the-waymarking-algorithm
///
/// This is essentially the single most memory intensive object in LLVM because
/// of the number of uses in the system. At the same time, the constant time
@@ -88,7 +88,7 @@ private:
enum PrevPtrTag { zeroDigitTag, oneDigitTag, stopTag, fullStopTag };
/// Constructor
- Use(PrevPtrTag tag) : Val(0) { Prev.setInt(tag); }
+ Use(PrevPtrTag tag) : Val(nullptr) { Prev.setInt(tag); }
public:
operator Value *() const { return Val; }
diff --git a/include/llvm/IR/User.h b/include/llvm/IR/User.h
index 061bc91..bc7696b 100644
--- a/include/llvm/IR/User.h
+++ b/include/llvm/IR/User.h
@@ -19,6 +19,7 @@
#ifndef LLVM_IR_USER_H
#define LLVM_IR_USER_H
+#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/ErrorHandling.h"
@@ -55,7 +56,7 @@ protected:
Use *allocHungoffUses(unsigned) const;
void dropHungoffUses() {
Use::zap(OperandList, OperandList + NumOperands, true);
- OperandList = 0;
+ OperandList = nullptr;
// Reset NumOperands so User::operator delete() does the right thing.
NumOperands = 0;
}
@@ -129,33 +130,13 @@ public:
/// Convenience iterator for directly iterating over the Values in the
/// OperandList
- class value_op_iterator : public std::iterator<std::forward_iterator_tag,
- Value*> {
- op_iterator OI;
- public:
- explicit value_op_iterator(Use *U) : OI(U) {}
-
- bool operator==(const value_op_iterator &x) const {
- return OI == x.OI;
- }
- bool operator!=(const value_op_iterator &x) const {
- return !operator==(x);
- }
-
- /// Iterator traversal: forward iteration only
- value_op_iterator &operator++() { // Preincrement
- ++OI;
- return *this;
- }
- value_op_iterator operator++(int) { // Postincrement
- value_op_iterator tmp = *this; ++*this; return tmp;
- }
-
- /// Retrieve a pointer to the current Value.
- Value *operator*() const {
- return *OI;
- }
+ struct value_op_iterator
+ : iterator_adaptor_base<value_op_iterator, op_iterator,
+ std::random_access_iterator_tag, Value *,
+ ptrdiff_t, Value *, Value *> {
+ explicit value_op_iterator(Use *U = nullptr) : iterator_adaptor_base(U) {}
+ Value *operator*() const { return *I; }
Value *operator->() const { return operator*(); }
};
@@ -179,7 +160,7 @@ public:
//
void dropAllReferences() {
for (Use &U : operands())
- U.set(0);
+ U.set(nullptr);
}
/// replaceUsesOfWith - Replaces all references to the "From" definition with
diff --git a/include/llvm/IR/Value.h b/include/llvm/IR/Value.h
index d5b9f11..0158683 100644
--- a/include/llvm/IR/Value.h
+++ b/include/llvm/IR/Value.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file declares the Value class.
+// This file declares the Value class.
//
//===----------------------------------------------------------------------===//
@@ -31,6 +31,7 @@ class Constant;
class DataLayout;
class Function;
class GlobalAlias;
+class GlobalObject;
class GlobalValue;
class GlobalVariable;
class InlineAsm;
@@ -52,7 +53,7 @@ typedef StringMapEntry<Value*> ValueName;
// Value Class
//===----------------------------------------------------------------------===//
-/// This is a very important LLVM class. It is the base class of all values
+/// This is a very important LLVM class. It is the base class of all values
/// computed by a program that may be used as operands to other values. Value is
/// the super class of other important classes such as Instruction and Function.
/// All Values have a Type. Type is not a subclass of Value. Some values can
@@ -182,10 +183,6 @@ private:
Value(const Value &) LLVM_DELETED_FUNCTION;
protected:
- /// printCustom - Value subclasses can override this to implement custom
- /// printing behavior.
- virtual void printCustom(raw_ostream &O) const;
-
Value(Type *Ty, unsigned scid);
public:
virtual ~Value();
@@ -196,14 +193,15 @@ public:
/// print - Implement operator<< on Value.
///
- void print(raw_ostream &O, AssemblyAnnotationWriter *AAW = 0) const;
+ void print(raw_ostream &O) const;
/// \brief Print the name of this Value out to the specified raw_ostream.
/// This is useful when you just want to print 'int %reg126', not the
/// instruction that generated it. If you specify a Module for context, then
/// even constanst get pretty-printed; for example, the type of a null
/// pointer is printed symbolically.
- void printAsOperand(raw_ostream &O, bool PrintType = true, const Module *M = 0) const;
+ void printAsOperand(raw_ostream &O, bool PrintType = true,
+ const Module *M = nullptr) const;
/// All values are typed, get the type of this value.
///
@@ -213,10 +211,10 @@ public:
LLVMContext &getContext() const;
// All values can potentially be named.
- bool hasName() const { return Name != 0 && SubclassID != MDStringVal; }
+ bool hasName() const { return Name != nullptr && SubclassID != MDStringVal; }
ValueName *getValueName() const { return Name; }
void setValueName(ValueName *VN) { Name = VN; }
-
+
/// getName() - Return a constant reference to the value's name. This is cheap
/// and guaranteed to return the same reference as long as the value is not
/// modified.
@@ -228,9 +226,9 @@ public:
/// \param Name The new name; or "" if the value's name should be removed.
void setName(const Twine &Name);
-
+
/// takeName - transfer the name from V to this value, setting V's name to
- /// empty. It is an error to call V->takeName(V).
+ /// empty. It is an error to call V->takeName(V).
void takeName(Value *V);
/// replaceAllUsesWith - Go through the uses list for this definition and make
@@ -242,7 +240,7 @@ public:
//----------------------------------------------------------------------
// Methods for handling the chain of uses of this Value.
//
- bool use_empty() const { return UseList == 0; }
+ bool use_empty() const { return UseList == nullptr; }
typedef use_iterator_impl<Use> use_iterator;
typedef use_iterator_impl<const Use> const_use_iterator;
@@ -303,7 +301,7 @@ public:
void addUse(Use &U) { U.addToList(&UseList); }
/// An enumeration for keeping track of the concrete subclass of Value that
- /// is actually instantiated. Values of this enumeration are kept in the
+ /// is actually instantiated. Values of this enumeration are kept in the
/// Value classes SubclassID field. They are used for concrete type
/// identification.
enum ValueTy {
@@ -327,9 +325,6 @@ public:
MDNodeVal, // This is an instance of MDNode
MDStringVal, // This is an instance of MDString
InlineAsmVal, // This is an instance of InlineAsm
- PseudoSourceValueVal, // This is an instance of PseudoSourceValue
- FixedStackPseudoSourceValueVal, // This is an instance of
- // FixedStackPseudoSourceValue
InstructionVal, // This is an instance of Instruction
// Enum values starting at InstructionVal are used for Instructions;
// don't add new values here!
@@ -436,7 +431,7 @@ public:
/// isDereferenceablePointer - Test if this value is always a pointer to
/// allocated and suitably aligned memory for a simple load or store.
bool isDereferenceablePointer() const;
-
+
/// DoPHITranslation - If this value is a PHI node with CurBB as its parent,
/// return the value in the PHI node corresponding to PredBB. If not, return
/// ourself. This is useful if you want to know the value something has in a
@@ -447,11 +442,11 @@ public:
const BasicBlock *PredBB) const{
return const_cast<Value*>(this)->DoPHITranslation(CurBB, PredBB);
}
-
+
/// MaximumAlignment - This is the greatest alignment value supported by
/// load, store, and alloca instructions, and global values.
static const unsigned MaximumAlignment = 1u << 29;
-
+
/// mutateType - Mutate the type of this Value to be of the specified type.
/// Note that this is an extremely dangerous operation which can create
/// completely invalid IR very easily. It is strongly recommended that you
@@ -460,7 +455,7 @@ public:
void mutateType(Type *Ty) {
VTy = Ty;
}
-
+
protected:
unsigned short getSubclassDataFromValue() const { return SubclassData; }
void setValueSubclassData(unsigned short D) { SubclassData = D; }
@@ -470,7 +465,7 @@ inline raw_ostream &operator<<(raw_ostream &OS, const Value &V) {
V.print(OS);
return OS;
}
-
+
void Use::set(Value *V) {
if (Val) removeFromList();
Val = V;
@@ -494,55 +489,60 @@ template <> struct isa_impl<Argument, Value> {
}
};
-template <> struct isa_impl<InlineAsm, Value> {
+template <> struct isa_impl<InlineAsm, Value> {
static inline bool doit(const Value &Val) {
return Val.getValueID() == Value::InlineAsmVal;
}
};
-template <> struct isa_impl<Instruction, Value> {
+template <> struct isa_impl<Instruction, Value> {
static inline bool doit(const Value &Val) {
return Val.getValueID() >= Value::InstructionVal;
}
};
-template <> struct isa_impl<BasicBlock, Value> {
+template <> struct isa_impl<BasicBlock, Value> {
static inline bool doit(const Value &Val) {
return Val.getValueID() == Value::BasicBlockVal;
}
};
-template <> struct isa_impl<Function, Value> {
+template <> struct isa_impl<Function, Value> {
static inline bool doit(const Value &Val) {
return Val.getValueID() == Value::FunctionVal;
}
};
-template <> struct isa_impl<GlobalVariable, Value> {
+template <> struct isa_impl<GlobalVariable, Value> {
static inline bool doit(const Value &Val) {
return Val.getValueID() == Value::GlobalVariableVal;
}
};
-template <> struct isa_impl<GlobalAlias, Value> {
+template <> struct isa_impl<GlobalAlias, Value> {
static inline bool doit(const Value &Val) {
return Val.getValueID() == Value::GlobalAliasVal;
}
};
-template <> struct isa_impl<GlobalValue, Value> {
+template <> struct isa_impl<GlobalValue, Value> {
+ static inline bool doit(const Value &Val) {
+ return isa<GlobalObject>(Val) || isa<GlobalAlias>(Val);
+ }
+};
+
+template <> struct isa_impl<GlobalObject, Value> {
static inline bool doit(const Value &Val) {
- return isa<GlobalVariable>(Val) || isa<Function>(Val) ||
- isa<GlobalAlias>(Val);
+ return isa<GlobalVariable>(Val) || isa<Function>(Val);
}
};
-template <> struct isa_impl<MDNode, Value> {
+template <> struct isa_impl<MDNode, Value> {
static inline bool doit(const Value &Val) {
return Val.getValueID() == Value::MDNodeVal;
}
};
-
+
// Value* is only 4-byte aligned.
template<>
class PointerLikeTypeTraits<Value*> {
@@ -559,7 +559,7 @@ public:
DEFINE_ISA_CONVERSION_FUNCTIONS(Value, LLVMValueRef)
/* Specialized opaque value conversions.
- */
+ */
inline Value **unwrap(LLVMValueRef *Vals) {
return reinterpret_cast<Value**>(Vals);
}
diff --git a/include/llvm/IR/ValueHandle.h b/include/llvm/IR/ValueHandle.h
index 9b5e11a..aa29b2e 100644
--- a/include/llvm/IR/ValueHandle.h
+++ b/include/llvm/IR/ValueHandle.h
@@ -64,14 +64,14 @@ private:
ValueHandleBase(const ValueHandleBase&) LLVM_DELETED_FUNCTION;
public:
explicit ValueHandleBase(HandleBaseKind Kind)
- : PrevPair(0, Kind), Next(0), VP(0, 0) {}
+ : PrevPair(nullptr, Kind), Next(nullptr), VP(nullptr, 0) {}
ValueHandleBase(HandleBaseKind Kind, Value *V)
- : PrevPair(0, Kind), Next(0), VP(V, 0) {
+ : PrevPair(nullptr, Kind), Next(nullptr), VP(V, 0) {
if (isValid(VP.getPointer()))
AddToUseList();
}
ValueHandleBase(HandleBaseKind Kind, const ValueHandleBase &RHS)
- : PrevPair(0, Kind), Next(0), VP(RHS.VP) {
+ : PrevPair(nullptr, Kind), Next(nullptr), VP(RHS.VP) {
if (isValid(VP.getPointer()))
AddToExistingUseList(RHS.getPrevPtr());
}
@@ -214,7 +214,7 @@ public:
AssertingVH(ValueTy *P) : ValueHandleBase(Assert, GetAsValue(P)) {}
AssertingVH(const AssertingVH &RHS) : ValueHandleBase(Assert, RHS) {}
#else
- AssertingVH() : ThePtr(0) {}
+ AssertingVH() : ThePtr(nullptr) {}
AssertingVH(ValueTy *P) : ThePtr(P) {}
#endif
@@ -366,7 +366,7 @@ public:
///
/// All implementations must remove the reference from this object to the
/// Value that's being destroyed.
- virtual void deleted() { setValPtr(NULL); }
+ virtual void deleted() { setValPtr(nullptr); }
/// Called when this->getValPtr()->replaceAllUsesWith(new_value) is called,
/// _before_ any of the uses have actually been replaced. If WeakVH were
diff --git a/include/llvm/IR/ValueMap.h b/include/llvm/IR/ValueMap.h
index 42da529..1503aed 100644
--- a/include/llvm/IR/ValueMap.h
+++ b/include/llvm/IR/ValueMap.h
@@ -67,7 +67,7 @@ struct ValueMapConfig {
/// and onDelete) and not inside other ValueMap methods. NULL means that no
/// mutex is necessary.
template<typename ExtraDataT>
- static sys::Mutex *getMutex(const ExtraDataT &/*Data*/) { return NULL; }
+ static sys::Mutex *getMutex(const ExtraDataT &/*Data*/) { return nullptr; }
};
/// See the file comment.
@@ -253,10 +253,10 @@ struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config> > {
typedef DenseMapInfo<KeyT> PointerInfo;
static inline VH getEmptyKey() {
- return VH(PointerInfo::getEmptyKey(), NULL);
+ return VH(PointerInfo::getEmptyKey(), nullptr);
}
static inline VH getTombstoneKey() {
- return VH(PointerInfo::getTombstoneKey(), NULL);
+ return VH(PointerInfo::getTombstoneKey(), nullptr);
}
static unsigned getHashValue(const VH &Val) {
return PointerInfo::getHashValue(Val.Unwrap());
diff --git a/include/llvm/IR/Verifier.h b/include/llvm/IR/Verifier.h
index 9a2f402..0272e20 100644
--- a/include/llvm/IR/Verifier.h
+++ b/include/llvm/IR/Verifier.h
@@ -28,6 +28,7 @@ namespace llvm {
class Function;
class FunctionPass;
+class ModulePass;
class Module;
class PreservedAnalyses;
class raw_ostream;
@@ -38,14 +39,14 @@ class raw_ostream;
/// If there are no errors, the function returns false. If an error is found,
/// a message describing the error is written to OS (if non-null) and true is
/// returned.
-bool verifyFunction(const Function &F, raw_ostream *OS = 0);
+bool verifyFunction(const Function &F, raw_ostream *OS = nullptr);
/// \brief Check a module for errors.
///
/// If there are no errors, the function returns false. If an error is found,
/// a message describing the error is written to OS (if non-null) and true is
/// returned.
-bool verifyModule(const Module &M, raw_ostream *OS = 0);
+bool verifyModule(const Module &M, raw_ostream *OS = nullptr);
/// \brief Create a verifier pass.
///
@@ -58,6 +59,18 @@ bool verifyModule(const Module &M, raw_ostream *OS = 0);
/// Note that this creates a pass suitable for the legacy pass manager. It has nothing to do with \c VerifierPass.
FunctionPass *createVerifierPass(bool FatalErrors = true);
+/// \brief Create a debug-info verifier pass.
+///
+/// Check a module for validity of debug info. This is essentially a pass
+/// wrapped around the debug-info parts of \a verifyModule(). When the pass
+/// detects a verification error it is always printed to stderr, and by default
+/// they are fatal. You can override that by passing \c false to \p
+/// FatalErrors.
+///
+/// Note that this creates a pass suitable for the legacy pass manager. It has
+/// nothing to do with \c VerifierPass.
+ModulePass *createDebugInfoVerifierPass(bool FatalErrors = true);
+
class VerifierPass {
bool FatalErrors;
diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h
index 9b9f234..8e53615 100644
--- a/include/llvm/InitializePasses.h
+++ b/include/llvm/InitializePasses.h
@@ -71,6 +71,7 @@ void initializeAliasDebuggerPass(PassRegistry&);
void initializeAliasSetPrinterPass(PassRegistry&);
void initializeAlwaysInlinerPass(PassRegistry&);
void initializeArgPromotionPass(PassRegistry&);
+void initializeAtomicExpandLoadLinkedPass(PassRegistry&);
void initializeSampleProfileLoaderPass(PassRegistry&);
void initializeBarrierNoopPass(PassRegistry&);
void initializeBasicAliasAnalysisPass(PassRegistry&);
@@ -103,6 +104,7 @@ void initializeDAHPass(PassRegistry&);
void initializeDCEPass(PassRegistry&);
void initializeDSEPass(PassRegistry&);
void initializeDebugIRPass(PassRegistry&);
+void initializeDebugInfoVerifierLegacyPassPass(PassRegistry &);
void initializeDeadInstEliminationPass(PassRegistry&);
void initializeDeadMachineInstructionElimPass(PassRegistry&);
void initializeDelinearizationPass(PassRegistry &);
@@ -236,6 +238,7 @@ void initializeSimpleInlinerPass(PassRegistry&);
void initializeRegisterCoalescerPass(PassRegistry&);
void initializeSingleLoopExtractorPass(PassRegistry&);
void initializeSinkingPass(PassRegistry&);
+void initializeSeparateConstOffsetFromGEPPass(PassRegistry &);
void initializeSlotIndexesPass(PassRegistry&);
void initializeSpillPlacementPass(PassRegistry&);
void initializeStackProtectorPass(PassRegistry&);
diff --git a/include/llvm/LTO/LTOCodeGenerator.h b/include/llvm/LTO/LTOCodeGenerator.h
index 5433991..b19b232 100644
--- a/include/llvm/LTO/LTOCodeGenerator.h
+++ b/include/llvm/LTO/LTOCodeGenerator.h
@@ -53,11 +53,9 @@ namespace llvm {
class TargetLibraryInfo;
class TargetMachine;
class raw_ostream;
-}
//===----------------------------------------------------------------------===//
-/// LTOCodeGenerator - C++ class which implements the opaque lto_code_gen_t
-/// type.
+/// C++ class which implements the opaque lto_code_gen_t type.
///
struct LTOCodeGenerator {
static const char *getVersionString();
@@ -68,11 +66,12 @@ struct LTOCodeGenerator {
// Merge given module, return true on success.
bool addModule(struct LTOModule*, std::string &errMsg);
- void setTargetOptions(llvm::TargetOptions options);
+ void setTargetOptions(TargetOptions options);
void setDebugInfo(lto_debug_model);
void setCodePICModel(lto_codegen_model);
void setCpu(const char *mCpu) { MCpu = mCpu; }
+ void setAttr(const char *mAttr) { MAttr = mAttr; }
void addMustPreserveSymbol(const char *sym) { MustPreserveSymbols[sym] = 1; }
@@ -120,40 +119,37 @@ struct LTOCodeGenerator {
private:
void initializeLTOPasses();
- bool generateObjectFile(llvm::raw_ostream &out,
- bool disableOpt,
- bool disableInline,
- bool disableGVNLoadPRE,
- std::string &errMsg);
+ bool generateObjectFile(raw_ostream &out, bool disableOpt, bool disableInline,
+ bool disableGVNLoadPRE, std::string &errMsg);
void applyScopeRestrictions();
- void applyRestriction(llvm::GlobalValue &GV,
- const llvm::ArrayRef<llvm::StringRef> &Libcalls,
- std::vector<const char*> &MustPreserveList,
- llvm::SmallPtrSet<llvm::GlobalValue*, 8> &AsmUsed,
- llvm::Mangler &Mangler);
+ void applyRestriction(GlobalValue &GV, const ArrayRef<StringRef> &Libcalls,
+ std::vector<const char *> &MustPreserveList,
+ SmallPtrSet<GlobalValue *, 8> &AsmUsed,
+ Mangler &Mangler);
bool determineTarget(std::string &errMsg);
- static void DiagnosticHandler(const llvm::DiagnosticInfo &DI, void *Context);
+ static void DiagnosticHandler(const DiagnosticInfo &DI, void *Context);
- void DiagnosticHandler2(const llvm::DiagnosticInfo &DI);
+ void DiagnosticHandler2(const DiagnosticInfo &DI);
- typedef llvm::StringMap<uint8_t> StringSet;
+ typedef StringMap<uint8_t> StringSet;
- llvm::LLVMContext &Context;
- llvm::Linker Linker;
- llvm::TargetMachine *TargetMach;
+ LLVMContext &Context;
+ Linker IRLinker;
+ TargetMachine *TargetMach;
bool EmitDwarfDebugInfo;
bool ScopeRestrictionsDone;
lto_codegen_model CodeModel;
StringSet MustPreserveSymbols;
StringSet AsmUndefinedRefs;
- llvm::MemoryBuffer *NativeObjectFile;
+ MemoryBuffer *NativeObjectFile;
std::vector<char *> CodegenOptions;
std::string MCpu;
+ std::string MAttr;
std::string NativeObjectPath;
- llvm::TargetOptions Options;
+ TargetOptions Options;
lto_diagnostic_handler_t DiagHandler;
void *DiagContext;
};
-
+}
#endif // LTO_CODE_GENERATOR_H
diff --git a/include/llvm/LTO/LTOModule.h b/include/llvm/LTO/LTOModule.h
index 1e4fa1b..f1b1480 100644
--- a/include/llvm/LTO/LTOModule.h
+++ b/include/llvm/LTO/LTOModule.h
@@ -31,25 +31,24 @@ namespace llvm {
class MemoryBuffer;
class TargetOptions;
class Value;
-}
//===----------------------------------------------------------------------===//
-/// LTOModule - C++ class which implements the opaque lto_module_t type.
+/// C++ class which implements the opaque lto_module_t type.
///
struct LTOModule {
private:
- typedef llvm::StringMap<uint8_t> StringSet;
+ typedef StringMap<uint8_t> StringSet;
struct NameAndAttributes {
const char *name;
uint32_t attributes;
bool isFunction;
- const llvm::GlobalValue *symbol;
+ const GlobalValue *symbol;
};
- std::unique_ptr<llvm::Module> _module;
- std::unique_ptr<llvm::TargetMachine> _target;
- llvm::MCObjectFileInfo ObjFileInfo;
+ std::unique_ptr<Module> _module;
+ std::unique_ptr<TargetMachine> _target;
+ MCObjectFileInfo ObjFileInfo;
StringSet _linkeropt_strings;
std::vector<const char *> _deplibs;
std::vector<const char *> _linkeropts;
@@ -57,174 +56,161 @@ private:
// _defines and _undefines only needed to disambiguate tentative definitions
StringSet _defines;
- llvm::StringMap<NameAndAttributes> _undefines;
+ StringMap<NameAndAttributes> _undefines;
std::vector<const char*> _asm_undefines;
- llvm::MCContext _context;
+ MCContext _context;
// Use mangler to add GlobalPrefix to names to match linker names.
- llvm::Mangler _mangler;
+ Mangler _mangler;
+
+ LTOModule(Module *m, TargetMachine *t);
- LTOModule(llvm::Module *m, llvm::TargetMachine *t);
public:
- /// isBitcodeFile - Returns 'true' if the file or memory contents is LLVM
- /// bitcode.
+ /// Returns 'true' if the file or memory contents is LLVM bitcode.
static bool isBitcodeFile(const void *mem, size_t length);
static bool isBitcodeFile(const char *path);
- /// isBitcodeFileForTarget - Returns 'true' if the file or memory contents
- /// is LLVM bitcode for the specified triple.
+ /// Returns 'true' if the file or memory contents is LLVM bitcode for the
+ /// specified triple.
static bool isBitcodeFileForTarget(const void *mem,
size_t length,
const char *triplePrefix);
static bool isBitcodeFileForTarget(const char *path,
const char *triplePrefix);
- /// makeLTOModule - Create an LTOModule. N.B. These methods take ownership
- /// of the buffer. The caller must have initialized the Targets, the
- /// TargetMCs, the AsmPrinters, and the AsmParsers by calling:
+ /// Create an LTOModule. N.B. These methods take ownership of the buffer. The
+ /// caller must have initialized the Targets, the TargetMCs, the AsmPrinters,
+ /// and the AsmParsers by calling:
///
/// InitializeAllTargets();
/// InitializeAllTargetMCs();
/// InitializeAllAsmPrinters();
/// InitializeAllAsmParsers();
- static LTOModule *makeLTOModule(const char* path,
- llvm::TargetOptions options,
+ static LTOModule *makeLTOModule(const char *path, TargetOptions options,
std::string &errMsg);
- static LTOModule *makeLTOModule(int fd, const char *path,
- size_t size, llvm::TargetOptions options,
+ static LTOModule *makeLTOModule(int fd, const char *path, size_t size,
+ TargetOptions options, std::string &errMsg);
+ static LTOModule *makeLTOModule(int fd, const char *path, size_t map_size,
+ off_t offset, TargetOptions options,
std::string &errMsg);
- static LTOModule *makeLTOModule(int fd, const char *path,
- size_t map_size,
- off_t offset, llvm::TargetOptions options,
- std::string& errMsg);
static LTOModule *makeLTOModule(const void *mem, size_t length,
- llvm::TargetOptions options,
- std::string &errMsg,
- llvm::StringRef path = "");
+ TargetOptions options, std::string &errMsg,
+ StringRef path = "");
- /// getTargetTriple - Return the Module's target triple.
+ /// Return the Module's target triple.
const char *getTargetTriple() {
return _module->getTargetTriple().c_str();
}
- /// setTargetTriple - Set the Module's target triple.
+ /// Set the Module's target triple.
void setTargetTriple(const char *triple) {
_module->setTargetTriple(triple);
}
- /// getSymbolCount - Get the number of symbols
+ /// Get the number of symbols
uint32_t getSymbolCount() {
return _symbols.size();
}
- /// getSymbolAttributes - Get the attributes for a symbol at the specified
- /// index.
+ /// Get the attributes for a symbol at the specified index.
lto_symbol_attributes getSymbolAttributes(uint32_t index) {
if (index < _symbols.size())
return lto_symbol_attributes(_symbols[index].attributes);
return lto_symbol_attributes(0);
}
- /// getSymbolName - Get the name of the symbol at the specified index.
+ /// Get the name of the symbol at the specified index.
const char *getSymbolName(uint32_t index) {
if (index < _symbols.size())
return _symbols[index].name;
- return NULL;
+ return nullptr;
}
- /// getDependentLibraryCount - Get the number of dependent libraries
+ /// Get the number of dependent libraries
uint32_t getDependentLibraryCount() {
return _deplibs.size();
}
- /// getDependentLibrary - Get the dependent library at the specified index.
+ /// Get the dependent library at the specified index.
const char *getDependentLibrary(uint32_t index) {
if (index < _deplibs.size())
return _deplibs[index];
- return NULL;
+ return nullptr;
}
- /// getLinkerOptCount - Get the number of linker options
+ /// Get the number of linker options
uint32_t getLinkerOptCount() {
return _linkeropts.size();
}
- /// getLinkerOpt - Get the linker option at the specified index.
+ /// Get the linker option at the specified index.
const char *getLinkerOpt(uint32_t index) {
if (index < _linkeropts.size())
return _linkeropts[index];
- return NULL;
+ return nullptr;
}
- /// getLLVVMModule - Return the Module.
- llvm::Module *getLLVVMModule() { return _module.get(); }
+ /// Return the Module.
+ Module *getLLVVMModule() { return _module.get(); }
- /// getAsmUndefinedRefs -
const std::vector<const char*> &getAsmUndefinedRefs() {
return _asm_undefines;
}
private:
- /// parseMetadata - Parse metadata from the module
+ /// Parse metadata from the module
// FIXME: it only parses "Linker Options" metadata at the moment
void parseMetadata();
- /// parseSymbols - Parse the symbols from the module and model-level ASM and
- /// add them to either the defined or undefined lists.
+ /// Parse the symbols from the module and model-level ASM and add them to
+ /// either the defined or undefined lists.
bool parseSymbols(std::string &errMsg);
- /// addPotentialUndefinedSymbol - Add a symbol which isn't defined just yet
- /// to a list to be resolved later.
- void addPotentialUndefinedSymbol(const llvm::GlobalValue *dcl, bool isFunc);
+ /// Add a symbol which isn't defined just yet to a list to be resolved later.
+ void addPotentialUndefinedSymbol(const GlobalValue *dcl, bool isFunc);
- /// addDefinedSymbol - Add a defined symbol to the list.
- void addDefinedSymbol(const llvm::GlobalValue *def, bool isFunction);
+ /// Add a defined symbol to the list.
+ void addDefinedSymbol(const GlobalValue *def, bool isFunction);
- /// addDefinedFunctionSymbol - Add a function symbol as defined to the list.
- void addDefinedFunctionSymbol(const llvm::Function *f);
+ /// Add a function symbol as defined to the list.
+ void addDefinedFunctionSymbol(const Function *f);
- /// addDefinedDataSymbol - Add a data symbol as defined to the list.
- void addDefinedDataSymbol(const llvm::GlobalValue *v);
+ /// Add a data symbol as defined to the list.
+ void addDefinedDataSymbol(const GlobalValue *v);
- /// addAsmGlobalSymbols - Add global symbols from module-level ASM to the
- /// defined or undefined lists.
+ /// Add global symbols from module-level ASM to the defined or undefined
+ /// lists.
bool addAsmGlobalSymbols(std::string &errMsg);
- /// addAsmGlobalSymbol - Add a global symbol from module-level ASM to the
- /// defined list.
+ /// Add a global symbol from module-level ASM to the defined list.
void addAsmGlobalSymbol(const char *, lto_symbol_attributes scope);
- /// addAsmGlobalSymbolUndef - Add a global symbol from module-level ASM to
- /// the undefined list.
+ /// Add a global symbol from module-level ASM to the undefined list.
void addAsmGlobalSymbolUndef(const char *);
- /// addObjCClass - Parse i386/ppc ObjC class data structure.
- void addObjCClass(const llvm::GlobalVariable *clgv);
+ /// Parse i386/ppc ObjC class data structure.
+ void addObjCClass(const GlobalVariable *clgv);
- /// addObjCCategory - Parse i386/ppc ObjC category data structure.
- void addObjCCategory(const llvm::GlobalVariable *clgv);
+ /// Parse i386/ppc ObjC category data structure.
+ void addObjCCategory(const GlobalVariable *clgv);
- /// addObjCClassRef - Parse i386/ppc ObjC class list data structure.
- void addObjCClassRef(const llvm::GlobalVariable *clgv);
+ /// Parse i386/ppc ObjC class list data structure.
+ void addObjCClassRef(const GlobalVariable *clgv);
- /// objcClassNameFromExpression - Get string that the data pointer points
- /// to.
- bool objcClassNameFromExpression(const llvm::Constant* c, std::string &name);
+ /// Get string that the data pointer points to.
+ bool objcClassNameFromExpression(const Constant *c, std::string &name);
- /// isTargetMatch - Returns 'true' if the memory buffer is for the specified
- /// target triple.
- static bool isTargetMatch(llvm::MemoryBuffer *memBuffer,
- const char *triplePrefix);
+ /// Returns 'true' if the memory buffer is for the specified target triple.
+ static bool isTargetMatch(MemoryBuffer *memBuffer, const char *triplePrefix);
- /// makeLTOModule - Create an LTOModule (private version). N.B. This
- /// method takes ownership of the buffer.
- static LTOModule *makeLTOModule(llvm::MemoryBuffer *buffer,
- llvm::TargetOptions options,
+ /// Create an LTOModule (private version). N.B. This method takes ownership of
+ /// the buffer.
+ static LTOModule *makeLTOModule(MemoryBuffer *buffer, TargetOptions options,
std::string &errMsg);
/// Create a MemoryBuffer from a memory range with an optional name.
- static llvm::MemoryBuffer *makeBuffer(const void *mem, size_t length,
- llvm::StringRef name = "");
+ static MemoryBuffer *makeBuffer(const void *mem, size_t length,
+ StringRef name = "");
};
-
+}
#endif // LTO_MODULE_H
diff --git a/include/llvm/LineEditor/LineEditor.h b/include/llvm/LineEditor/LineEditor.h
index 42839ed..1a9a691 100644
--- a/include/llvm/LineEditor/LineEditor.h
+++ b/include/llvm/LineEditor/LineEditor.h
@@ -11,9 +11,9 @@
#define LLVM_LINEEDITOR_LINEEDITOR_H
#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/StringRef.h"
-#include <stdio.h>
+#include <cstdio>
+#include <memory>
#include <string>
#include <vector>
diff --git a/include/llvm/LinkAllPasses.h b/include/llvm/LinkAllPasses.h
index 1603250..2616ebd 100644
--- a/include/llvm/LinkAllPasses.h
+++ b/include/llvm/LinkAllPasses.h
@@ -53,7 +53,7 @@ namespace {
(void) llvm::createAliasDebugger();
(void) llvm::createArgumentPromotionPass();
(void) llvm::createBasicAliasAnalysisPass();
- (void) llvm::createLibCallAliasAnalysisPass(0);
+ (void) llvm::createLibCallAliasAnalysisPass(nullptr);
(void) llvm::createScalarEvolutionAliasAnalysisPass();
(void) llvm::createTypeBasedAliasAnalysisPass();
(void) llvm::createBoundsCheckingPass();
@@ -140,9 +140,9 @@ namespace {
(void) llvm::createMetaRenamerPass();
(void) llvm::createFunctionAttrsPass();
(void) llvm::createMergeFunctionsPass();
- (void) llvm::createPrintModulePass(*(llvm::raw_ostream*)0);
- (void) llvm::createPrintFunctionPass(*(llvm::raw_ostream*)0);
- (void) llvm::createPrintBasicBlockPass(*(llvm::raw_ostream*)0);
+ (void) llvm::createPrintModulePass(*(llvm::raw_ostream*)nullptr);
+ (void) llvm::createPrintFunctionPass(*(llvm::raw_ostream*)nullptr);
+ (void) llvm::createPrintBasicBlockPass(*(llvm::raw_ostream*)nullptr);
(void) llvm::createModuleDebugInfoPrinterPass();
(void) llvm::createPartialInliningPass();
(void) llvm::createLintPass();
@@ -156,15 +156,16 @@ namespace {
(void) llvm::createBBVectorizePass();
(void) llvm::createPartiallyInlineLibCallsPass();
(void) llvm::createScalarizerPass();
+ (void) llvm::createSeparateConstOffsetFromGEPPass();
(void)new llvm::IntervalPartition();
(void)new llvm::FindUsedTypes();
(void)new llvm::ScalarEvolution();
- ((llvm::Function*)0)->viewCFGOnly();
+ ((llvm::Function*)nullptr)->viewCFGOnly();
llvm::RGPassManager RGM;
- ((llvm::RegionPass*)0)->runOnRegion((llvm::Region*)0, RGM);
- llvm::AliasSetTracker X(*(llvm::AliasAnalysis*)0);
- X.add((llvm::Value*)0, 0, 0); // for -print-alias-sets
+ ((llvm::RegionPass*)nullptr)->runOnRegion((llvm::Region*)nullptr, RGM);
+ llvm::AliasSetTracker X(*(llvm::AliasAnalysis*)nullptr);
+ X.add((llvm::Value*)nullptr, 0, nullptr); // for -print-alias-sets
}
} ForcePassLinking; // Force link by creating a global definition.
}
diff --git a/include/llvm/MC/MCAsmInfo.h b/include/llvm/MC/MCAsmInfo.h
index 037a24f..f7d3be2 100644
--- a/include/llvm/MC/MCAsmInfo.h
+++ b/include/llvm/MC/MCAsmInfo.h
@@ -365,7 +365,7 @@ namespace llvm {
/// specify a section to switch to if the translation unit doesn't have any
/// trampolines that require an executable stack.
virtual const MCSection *getNonexecutableStackSection(MCContext &Ctx) const{
- return 0;
+ return nullptr;
}
virtual const MCExpr *
diff --git a/include/llvm/MC/MCAsmLayout.h b/include/llvm/MC/MCAsmLayout.h
index 3058b7b..f048e34 100644
--- a/include/llvm/MC/MCAsmLayout.h
+++ b/include/llvm/MC/MCAsmLayout.h
@@ -17,6 +17,7 @@ namespace llvm {
class MCAssembler;
class MCFragment;
class MCSectionData;
+class MCSymbol;
class MCSymbolData;
/// Encapsulates the layout of an assembly file at a particular point in time.
@@ -102,8 +103,15 @@ public:
/// \brief Get the offset of the given symbol, as computed in the current
/// layout.
+ /// \result True on success.
+ bool getSymbolOffset(const MCSymbolData *SD, uint64_t &Val) const;
+
+ /// \brief Variant that reports a fatal error if the offset is not computable.
uint64_t getSymbolOffset(const MCSymbolData *SD) const;
+ /// \brief If this symbol is equivalent to A + Constant, return A.
+ const MCSymbol *getBaseSymbol(const MCSymbol &Symbol) const;
+
/// @}
};
diff --git a/include/llvm/MC/MCAssembler.h b/include/llvm/MC/MCAssembler.h
index 34b760c..be13b36 100644
--- a/include/llvm/MC/MCAssembler.h
+++ b/include/llvm/MC/MCAssembler.h
@@ -52,7 +52,6 @@ public:
enum FragmentType {
FT_Align,
FT_Data,
- FT_Compressed,
FT_CompactEncodedInst,
FT_Fill,
FT_Relaxable,
@@ -87,7 +86,7 @@ private:
/// @}
protected:
- MCFragment(FragmentType _Kind, MCSectionData *_Parent = 0);
+ MCFragment(FragmentType _Kind, MCSectionData *_Parent = nullptr);
public:
// Only for sentinel.
@@ -138,7 +137,7 @@ class MCEncodedFragment : public MCFragment {
uint8_t BundlePadding;
public:
- MCEncodedFragment(MCFragment::FragmentType FType, MCSectionData *SD = 0)
+ MCEncodedFragment(MCFragment::FragmentType FType, MCSectionData *SD = nullptr)
: MCFragment(FType, SD), BundlePadding(0)
{
}
@@ -162,7 +161,6 @@ public:
return false;
case MCFragment::FT_Relaxable:
case MCFragment::FT_CompactEncodedInst:
- case MCFragment::FT_Compressed:
case MCFragment::FT_Data:
return true;
}
@@ -177,7 +175,7 @@ class MCEncodedFragmentWithFixups : public MCEncodedFragment {
public:
MCEncodedFragmentWithFixups(MCFragment::FragmentType FType,
- MCSectionData *SD = 0)
+ MCSectionData *SD = nullptr)
: MCEncodedFragment(FType, SD)
{
}
@@ -197,8 +195,7 @@ public:
static bool classof(const MCFragment *F) {
MCFragment::FragmentType Kind = F->getKind();
- return Kind == MCFragment::FT_Relaxable || Kind == MCFragment::FT_Data ||
- Kind == MCFragment::FT_Compressed;
+ return Kind == MCFragment::FT_Relaxable || Kind == MCFragment::FT_Data;
}
};
@@ -217,13 +214,8 @@ class MCDataFragment : public MCEncodedFragmentWithFixups {
/// Fixups - The list of fixups in this fragment.
SmallVector<MCFixup, 4> Fixups;
-protected:
- MCDataFragment(MCFragment::FragmentType FType, MCSectionData *SD = 0)
- : MCEncodedFragmentWithFixups(FType, SD), HasInstructions(false),
- AlignToBundleEnd(false) {}
-
public:
- MCDataFragment(MCSectionData *SD = 0)
+ MCDataFragment(MCSectionData *SD = nullptr)
: MCEncodedFragmentWithFixups(FT_Data, SD),
HasInstructions(false), AlignToBundleEnd(false)
{
@@ -255,21 +247,10 @@ public:
const_fixup_iterator fixup_end() const override {return Fixups.end();}
static bool classof(const MCFragment *F) {
- return F->getKind() == MCFragment::FT_Data ||
- F->getKind() == MCFragment::FT_Compressed;
+ return F->getKind() == MCFragment::FT_Data;
}
};
-class MCCompressedFragment: public MCDataFragment {
- mutable SmallVector<char, 32> CompressedContents;
-public:
- MCCompressedFragment(MCSectionData *SD = nullptr)
- : MCDataFragment(FT_Compressed, SD) {}
- const SmallVectorImpl<char> &getCompressedContents() const;
- using MCDataFragment::getContents;
- SmallVectorImpl<char> &getContents() override;
-};
-
/// This is a compact (memory-size-wise) fragment for holding an encoded
/// instruction (non-relaxable) that has no fixups registered. When applicable,
/// it can be used instead of MCDataFragment and lead to lower memory
@@ -283,7 +264,7 @@ class MCCompactEncodedInstFragment : public MCEncodedFragment {
SmallVector<char, 4> Contents;
public:
- MCCompactEncodedInstFragment(MCSectionData *SD = 0)
+ MCCompactEncodedInstFragment(MCSectionData *SD = nullptr)
: MCEncodedFragment(FT_CompactEncodedInst, SD), AlignToBundleEnd(false)
{
}
@@ -326,7 +307,7 @@ class MCRelaxableFragment : public MCEncodedFragmentWithFixups {
public:
MCRelaxableFragment(const MCInst &_Inst,
const MCSubtargetInfo &_STI,
- MCSectionData *SD = 0)
+ MCSectionData *SD = nullptr)
: MCEncodedFragmentWithFixups(FT_Relaxable, SD), Inst(_Inst), STI(_STI) {
}
@@ -382,7 +363,7 @@ class MCAlignFragment : public MCFragment {
public:
MCAlignFragment(unsigned _Alignment, int64_t _Value, unsigned _ValueSize,
- unsigned _MaxBytesToEmit, MCSectionData *SD = 0)
+ unsigned _MaxBytesToEmit, MCSectionData *SD = nullptr)
: MCFragment(FT_Align, SD), Alignment(_Alignment),
Value(_Value),ValueSize(_ValueSize),
MaxBytesToEmit(_MaxBytesToEmit), EmitNops(false) {}
@@ -423,7 +404,7 @@ class MCFillFragment : public MCFragment {
public:
MCFillFragment(int64_t _Value, unsigned _ValueSize, uint64_t _Size,
- MCSectionData *SD = 0)
+ MCSectionData *SD = nullptr)
: MCFragment(FT_Fill, SD),
Value(_Value), ValueSize(_ValueSize), Size(_Size) {
assert((!ValueSize || (Size % ValueSize) == 0) &&
@@ -456,7 +437,8 @@ class MCOrgFragment : public MCFragment {
int8_t Value;
public:
- MCOrgFragment(const MCExpr &_Offset, int8_t _Value, MCSectionData *SD = 0)
+ MCOrgFragment(const MCExpr &_Offset, int8_t _Value,
+ MCSectionData *SD = nullptr)
: MCFragment(FT_Org, SD),
Offset(&_Offset), Value(_Value) {}
@@ -485,7 +467,8 @@ class MCLEBFragment : public MCFragment {
SmallString<8> Contents;
public:
- MCLEBFragment(const MCExpr &Value_, bool IsSigned_, MCSectionData *SD = 0)
+ MCLEBFragment(const MCExpr &Value_, bool IsSigned_,
+ MCSectionData *SD = nullptr)
: MCFragment(FT_LEB, SD),
Value(&Value_), IsSigned(IsSigned_) { Contents.push_back(0); }
@@ -521,7 +504,7 @@ class MCDwarfLineAddrFragment : public MCFragment {
public:
MCDwarfLineAddrFragment(int64_t _LineDelta, const MCExpr &_AddrDelta,
- MCSectionData *SD = 0)
+ MCSectionData *SD = nullptr)
: MCFragment(FT_Dwarf, SD),
LineDelta(_LineDelta), AddrDelta(&_AddrDelta) { Contents.push_back(0); }
@@ -552,7 +535,8 @@ class MCDwarfCallFrameFragment : public MCFragment {
SmallString<8> Contents;
public:
- MCDwarfCallFrameFragment(const MCExpr &_AddrDelta, MCSectionData *SD = 0)
+ MCDwarfCallFrameFragment(const MCExpr &_AddrDelta,
+ MCSectionData *SD = nullptr)
: MCFragment(FT_DwarfFrame, SD),
AddrDelta(&_AddrDelta) { Contents.push_back(0); }
@@ -633,7 +617,7 @@ private:
public:
// Only for use as sentinel.
MCSectionData();
- MCSectionData(const MCSection &Section, MCAssembler *A = 0);
+ MCSectionData(const MCSection &Section, MCAssembler *A = nullptr);
const MCSection &getSection() const { return *Section; }
@@ -743,7 +727,7 @@ public:
// Only for use as sentinel.
MCSymbolData();
MCSymbolData(const MCSymbol &_Symbol, MCFragment *_Fragment, uint64_t _Offset,
- MCAssembler *A = 0);
+ MCAssembler *A = nullptr);
/// @name Accessors
/// @{
@@ -850,6 +834,9 @@ public:
typedef SymbolDataListType::const_iterator const_symbol_iterator;
typedef SymbolDataListType::iterator symbol_iterator;
+ typedef iterator_range<symbol_iterator> symbol_range;
+ typedef iterator_range<const_symbol_iterator> const_symbol_range;
+
typedef std::vector<std::string> FileNameVectorType;
typedef FileNameVectorType::const_iterator const_file_name_iterator;
@@ -915,7 +902,7 @@ private:
// here. Maybe when the relocation stuff moves to target specific,
// this can go with it? The streamer would need some target specific
// refactoring too.
- SmallPtrSet<const MCSymbol*, 64> ThumbFuncs;
+ mutable SmallPtrSet<const MCSymbol*, 64> ThumbFuncs;
/// \brief The bundle alignment size currently set in the assembler.
///
@@ -1008,9 +995,7 @@ public:
const MCAsmLayout &Layout) const;
/// Check whether a given symbol has been flagged with .thumb_func.
- bool isThumbFunc(const MCSymbol *Func) const {
- return ThumbFuncs.count(Func);
- }
+ bool isThumbFunc(const MCSymbol *Func) const;
/// Flag a function symbol as the target of a .thumb_func directive.
void setIsThumbFunc(const MCSymbol *Func) { ThumbFuncs.insert(Func); }
@@ -1115,6 +1100,9 @@ public:
symbol_iterator symbol_end() { return Symbols.end(); }
const_symbol_iterator symbol_end() const { return Symbols.end(); }
+ symbol_range symbols() { return make_range(symbol_begin(), symbol_end()); }
+ const_symbol_range symbols() const { return make_range(symbol_begin(), symbol_end()); }
+
size_t symbol_size() const { return Symbols.size(); }
/// @}
@@ -1203,7 +1191,7 @@ public:
}
MCSectionData &getOrCreateSectionData(const MCSection &Section,
- bool *Created = 0) {
+ bool *Created = nullptr) {
MCSectionData *&Entry = SectionMap[&Section];
if (Created) *Created = !Entry;
@@ -1214,22 +1202,27 @@ public:
}
bool hasSymbolData(const MCSymbol &Symbol) const {
- return SymbolMap.lookup(&Symbol) != 0;
+ return SymbolMap.lookup(&Symbol) != nullptr;
+ }
+
+ MCSymbolData &getSymbolData(const MCSymbol &Symbol) {
+ return const_cast<MCSymbolData &>(
+ static_cast<const MCAssembler &>(*this).getSymbolData(Symbol));
}
- MCSymbolData &getSymbolData(const MCSymbol &Symbol) const {
+ const MCSymbolData &getSymbolData(const MCSymbol &Symbol) const {
MCSymbolData *Entry = SymbolMap.lookup(&Symbol);
assert(Entry && "Missing symbol data!");
return *Entry;
}
MCSymbolData &getOrCreateSymbolData(const MCSymbol &Symbol,
- bool *Created = 0) {
+ bool *Created = nullptr) {
MCSymbolData *&Entry = SymbolMap[&Symbol];
if (Created) *Created = !Entry;
if (!Entry)
- Entry = new MCSymbolData(Symbol, 0, 0, this);
+ Entry = new MCSymbolData(Symbol, nullptr, 0, this);
return *Entry;
}
diff --git a/include/llvm/MC/MCContext.h b/include/llvm/MC/MCContext.h
index 9091ed9..7557e76 100644
--- a/include/llvm/MC/MCContext.h
+++ b/include/llvm/MC/MCContext.h
@@ -137,7 +137,7 @@ namespace llvm {
/// The information gathered from labels that will have dwarf label
/// entries when generating dwarf assembly source files.
- std::vector<const MCGenDwarfLabelEntry *> MCGenDwarfLabelEntries;
+ std::vector<MCGenDwarfLabelEntry> MCGenDwarfLabelEntries;
/// The string to embed in the debug information for the compile unit, if
/// non-empty.
@@ -147,6 +147,9 @@ namespace llvm {
/// non-empty.
StringRef DwarfDebugProducer;
+ /// The maximum version of dwarf that we should emit.
+ uint16_t DwarfVersion;
+
/// Honor temporary labels, this is useful for debugging semantic
/// differences between temporary and non-temporary labels (primarily on
/// Darwin).
@@ -155,7 +158,11 @@ namespace llvm {
/// The Compile Unit ID that we are currently processing.
unsigned DwarfCompileUnitID;
- void *MachOUniquingMap, *ELFUniquingMap, *COFFUniquingMap;
+ typedef std::pair<std::string, std::string> SectionGroupPair;
+
+ StringMap<const MCSectionMachO*> MachOUniquingMap;
+ std::map<SectionGroupPair, const MCSectionELF *> ELFUniquingMap;
+ std::map<SectionGroupPair, const MCSectionCOFF *> COFFUniquingMap;
/// Do automatic reset in destructor
bool AutoReset;
@@ -167,8 +174,8 @@ namespace llvm {
public:
explicit MCContext(const MCAsmInfo *MAI, const MCRegisterInfo *MRI,
- const MCObjectFileInfo *MOFI, const SourceMgr *Mgr = 0,
- bool DoAutoReset = true);
+ const MCObjectFileInfo *MOFI,
+ const SourceMgr *Mgr = nullptr, bool DoAutoReset = true);
~MCContext();
const SourceMgr *getSourceManager() const { return SrcMgr; }
@@ -259,6 +266,8 @@ namespace llvm {
unsigned Flags, SectionKind Kind,
unsigned EntrySize, StringRef Group);
+ void renameELFSection(const MCSectionELF *Section, StringRef Name);
+
const MCSectionELF *CreateELFGroupSection();
const MCSectionCOFF *getCOFFSection(StringRef Section,
@@ -266,7 +275,7 @@ namespace llvm {
SectionKind Kind,
StringRef COMDATSymName,
int Selection,
- const MCSectionCOFF *Assoc = 0);
+ const MCSectionCOFF *Assoc = nullptr);
const MCSectionCOFF *getCOFFSection(StringRef Section,
unsigned Characteristics,
@@ -304,14 +313,6 @@ namespace llvm {
bool isValidDwarfFileNumber(unsigned FileNumber, unsigned CUID = 0);
- bool hasDwarfFiles() const {
- // Traverse MCDwarfFilesCUMap and check whether each entry is empty.
- for (const auto &FileTable : MCDwarfLineTablesCUMap)
- if (!FileTable.second.getMCDwarfFiles().empty())
- return true;
- return false;
- }
-
const std::map<unsigned, MCDwarfLineTable> &getMCDwarfLineTables() const {
return MCDwarfLineTablesCUMap;
}
@@ -385,11 +386,10 @@ namespace llvm {
void setGenDwarfSectionEndSym(MCSymbol *Sym) {
GenDwarfSectionEndSym = Sym;
}
- const std::vector<const MCGenDwarfLabelEntry *>
- &getMCGenDwarfLabelEntries() const {
+ const std::vector<MCGenDwarfLabelEntry> &getMCGenDwarfLabelEntries() const {
return MCGenDwarfLabelEntries;
}
- void addMCGenDwarfLabelEntry(const MCGenDwarfLabelEntry *E) {
+ void addMCGenDwarfLabelEntry(const MCGenDwarfLabelEntry &E) {
MCGenDwarfLabelEntries.push_back(E);
}
@@ -399,6 +399,9 @@ namespace llvm {
void setDwarfDebugProducer(StringRef S) { DwarfDebugProducer = S; }
StringRef getDwarfDebugProducer() { return DwarfDebugProducer; }
+ void setDwarfVersion(uint16_t v) { DwarfVersion = v; }
+ uint16_t getDwarfVersion() const { return DwarfVersion; }
+
/// @}
char *getSecureLogFile() { return SecureLogFile; }
@@ -420,7 +423,7 @@ namespace llvm {
// Unrecoverable error has occurred. Display the best diagnostic we can
// and bail via exit(1). For now, most MC backend errors are unrecoverable.
// FIXME: We should really do something about that.
- LLVM_ATTRIBUTE_NORETURN void FatalError(SMLoc L, const Twine &Msg);
+ LLVM_ATTRIBUTE_NORETURN void FatalError(SMLoc L, const Twine &Msg) const;
};
} // end namespace llvm
diff --git a/include/llvm/MC/MCDisassembler.h b/include/llvm/MC/MCDisassembler.h
index d545fc7..9d441bb 100644
--- a/include/llvm/MC/MCDisassembler.h
+++ b/include/llvm/MC/MCDisassembler.h
@@ -10,7 +10,6 @@
#define LLVM_MC_MCDISASSEMBLER_H
#include "llvm-c/Disassembler.h"
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/MC/MCRelocationInfo.h"
#include "llvm/MC/MCSymbolizer.h"
#include "llvm/Support/DataTypes.h"
@@ -56,9 +55,8 @@ public:
};
/// Constructor - Performs initial setup for the disassembler.
- MCDisassembler(const MCSubtargetInfo &STI)
- : GetOpInfo(0), SymbolLookUp(0), DisInfo(0), Ctx(0), STI(STI),
- Symbolizer(), CommentStream(0) {}
+ MCDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx)
+ : Ctx(Ctx), STI(STI), Symbolizer(), CommentStream(nullptr) {}
virtual ~MCDisassembler();
@@ -85,18 +83,7 @@ public:
raw_ostream &vStream,
raw_ostream &cStream) const = 0;
private:
- //
- // Hooks for symbolic disassembly via the public 'C' interface.
- //
- // The function to get the symbolic information for operands.
- LLVMOpInfoCallback GetOpInfo;
- // The function to lookup a symbol name.
- LLVMSymbolLookupCallback SymbolLookUp;
- // The pointer to the block of symbolic information for above call back.
- void *DisInfo;
- // The assembly context for creating symbols and MCExprs in place of
- // immediate operands when there is symbolic information.
- MCContext *Ctx;
+ MCContext &Ctx;
protected:
// Subtarget information, for instruction decoding predicates if required.
@@ -116,19 +103,7 @@ public:
/// This takes ownership of \p Symzer, and deletes the previously set one.
void setSymbolizer(std::unique_ptr<MCSymbolizer> Symzer);
- /// Sets up an external symbolizer that uses the C API callbacks.
- void setupForSymbolicDisassembly(LLVMOpInfoCallback GetOpInfo,
- LLVMSymbolLookupCallback SymbolLookUp,
- void *DisInfo,
- MCContext *Ctx,
- std::unique_ptr<MCRelocationInfo> &RelInfo);
-
- LLVMOpInfoCallback getLLVMOpInfoCallback() const { return GetOpInfo; }
- LLVMSymbolLookupCallback getLLVMSymbolLookupCallback() const {
- return SymbolLookUp;
- }
- void *getDisInfoBlock() const { return DisInfo; }
- MCContext *getMCContext() const { return Ctx; }
+ MCContext& getContext() const { return Ctx; }
const MCSubtargetInfo& getSubtargetInfo() const { return STI; }
diff --git a/include/llvm/MC/MCDwarf.h b/include/llvm/MC/MCDwarf.h
index 6e77c6c..6df8a19 100644
--- a/include/llvm/MC/MCDwarf.h
+++ b/include/llvm/MC/MCDwarf.h
@@ -30,6 +30,7 @@
namespace llvm {
class MCAsmBackend;
class MCContext;
+class MCObjectStreamer;
class MCSection;
class MCStreamer;
class MCSymbol;
@@ -147,7 +148,7 @@ public:
// This is called when an instruction is assembled into the specified
// section and if there is information from the last .loc directive that
// has yet to have a line entry made for it is made.
- static void Make(MCStreamer *MCOS, const MCSection *Section);
+ static void Make(MCObjectStreamer *MCOS, const MCSection *Section);
};
/// MCLineSection - Instances of this class represent the line information
@@ -210,10 +211,10 @@ class MCDwarfLineTable {
public:
// This emits the Dwarf file and the line tables for all Compile Units.
- static void Emit(MCStreamer *MCOS);
+ static void Emit(MCObjectStreamer *MCOS);
// This emits the Dwarf file and the line tables for a given Compile Unit.
- void EmitCU(MCStreamer *MCOS) const;
+ void EmitCU(MCObjectStreamer *MCOS) const;
unsigned getFile(StringRef &Directory, StringRef &FileName,
unsigned FileNumber = 0);
@@ -464,9 +465,9 @@ public:
struct MCDwarfFrameInfo {
MCDwarfFrameInfo()
- : Begin(0), End(0), Personality(0), Lsda(0), Function(0), Instructions(),
- PersonalityEncoding(), LsdaEncoding(0), CompactUnwindEncoding(0),
- IsSignalFrame(false), IsSimple(false) {}
+ : Begin(nullptr), End(nullptr), Personality(nullptr), Lsda(nullptr),
+ Function(nullptr), Instructions(), PersonalityEncoding(), LsdaEncoding(0),
+ CompactUnwindEncoding(0), IsSignalFrame(false), IsSimple(false) {}
MCSymbol *Begin;
MCSymbol *End;
const MCSymbol *Personality;
@@ -485,9 +486,8 @@ public:
//
// This emits the frame info section.
//
- static void Emit(MCStreamer &streamer, MCAsmBackend *MAB,
- bool usingCFI, bool isEH);
- static void EmitAdvanceLoc(MCStreamer &Streamer, uint64_t AddrDelta);
+ static void Emit(MCObjectStreamer &streamer, MCAsmBackend *MAB, bool isEH);
+ static void EmitAdvanceLoc(MCObjectStreamer &Streamer, uint64_t AddrDelta);
static void EncodeAdvanceLoc(MCContext &Context, uint64_t AddrDelta,
raw_ostream &OS);
};
diff --git a/include/llvm/MC/MCELFStreamer.h b/include/llvm/MC/MCELFStreamer.h
index ebd5d57..be39128 100644
--- a/include/llvm/MC/MCELFStreamer.h
+++ b/include/llvm/MC/MCELFStreamer.h
@@ -61,18 +61,17 @@ public:
void EmitCOFFSymbolType(int Type) override;
void EndCOFFSymbolDef() override;
- MCSymbolData &getOrCreateSymbolData(const MCSymbol *Symbol) override;
-
void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) override;
void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment) override;
- void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
+ void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = nullptr,
uint64_t Size = 0, unsigned ByteAlignment = 0) override;
void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment = 0) override;
- void EmitValueImpl(const MCExpr *Value, unsigned Size) override;
+ void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ const SMLoc &Loc = SMLoc()) override;
void EmitFileDirective(StringRef Filename) override;
diff --git a/include/llvm/MC/MCELFSymbolFlags.h b/include/llvm/MC/MCELFSymbolFlags.h
index 5b82a58..2f1f561 100644
--- a/include/llvm/MC/MCELFSymbolFlags.h
+++ b/include/llvm/MC/MCELFSymbolFlags.h
@@ -24,9 +24,7 @@ namespace llvm {
ELF_STT_Shift = 0, // Shift value for STT_* flags.
ELF_STB_Shift = 4, // Shift value for STB_* flags.
ELF_STV_Shift = 8, // Shift value for STV_* flags.
- ELF_STO_Shift = 10, // Shift value for STO_* flags.
- ELF_Other_Shift = 16 // Shift value for llvm local flags,
- // not part of the final object file
+ ELF_STO_Shift = 10 // Shift value for STO_* flags.
};
enum ELFSymbolFlags {
@@ -49,9 +47,7 @@ namespace llvm {
ELF_STV_Default = (ELF::STV_DEFAULT << ELF_STV_Shift),
ELF_STV_Internal = (ELF::STV_INTERNAL << ELF_STV_Shift),
ELF_STV_Hidden = (ELF::STV_HIDDEN << ELF_STV_Shift),
- ELF_STV_Protected = (ELF::STV_PROTECTED << ELF_STV_Shift),
-
- ELF_Other_ThumbFunc = (1 << ELF_Other_Shift)
+ ELF_STV_Protected = (ELF::STV_PROTECTED << ELF_STV_Shift)
};
} // end namespace llvm
diff --git a/include/llvm/MC/MCExpr.h b/include/llvm/MC/MCExpr.h
index 0033a54..ca5cecb 100644
--- a/include/llvm/MC/MCExpr.h
+++ b/include/llvm/MC/MCExpr.h
@@ -53,8 +53,9 @@ protected:
bool EvaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm,
const MCAsmLayout *Layout,
- const SectionAddrMap *Addrs,
- bool InSet) const;
+ const SectionAddrMap *Addrs, bool InSet,
+ bool ForceVarExpansion) const;
+
public:
/// @name Accessors
/// @{
@@ -93,6 +94,14 @@ public:
/// @result - True on success.
bool EvaluateAsRelocatable(MCValue &Res, const MCAsmLayout *Layout) const;
+ /// \brief Try to evaluate the expression to the form (a - b + constant) where
+ /// neither a nor b are variables.
+ ///
+ /// This is a more aggressive variant of EvaluateAsRelocatable. The intended
+ /// use is for when relocations are not available, like the symbol value in
+ /// the symbol table.
+ bool EvaluateAsValue(MCValue &Res, const MCAsmLayout *Layout) const;
+
/// FindAssociatedSection - Find the "associated section" for this expression,
/// which is currently defined as the absolute section for constants, or
/// otherwise the section associated with the first defined symbol in the
@@ -253,6 +262,8 @@ public:
VK_Mips_GOT_LO16,
VK_Mips_CALL_HI16,
VK_Mips_CALL_LO16,
+ VK_Mips_PCREL_HI16,
+ VK_Mips_PCREL_LO16,
VK_COFF_IMGREL32 // symbol@imgrel (image-relative)
};
diff --git a/include/llvm/MC/MCExternalSymbolizer.h b/include/llvm/MC/MCExternalSymbolizer.h
index cab9152..2c7d237 100644
--- a/include/llvm/MC/MCExternalSymbolizer.h
+++ b/include/llvm/MC/MCExternalSymbolizer.h
@@ -26,7 +26,7 @@ namespace llvm {
///
/// See llvm-c/Disassembler.h.
class MCExternalSymbolizer : public MCSymbolizer {
-
+protected:
/// \name Hooks for symbolic disassembly via the public 'C' interface.
/// @{
/// The function to get the symbolic information for operands.
diff --git a/include/llvm/MC/MCFixup.h b/include/llvm/MC/MCFixup.h
index e6d675f..98a1419 100644
--- a/include/llvm/MC/MCFixup.h
+++ b/include/llvm/MC/MCFixup.h
@@ -88,8 +88,6 @@ public:
MCFixupKind getKind() const { return MCFixupKind(Kind); }
- MCSymbolRefExpr::VariantKind getAccessVariant() const;
-
uint32_t getOffset() const { return Offset; }
void setOffset(uint32_t Value) { Offset = Value; }
diff --git a/include/llvm/MC/MCFunction.h b/include/llvm/MC/MCFunction.h
index 22c9192..bfa470b 100644
--- a/include/llvm/MC/MCFunction.h
+++ b/include/llvm/MC/MCFunction.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCInst.h"
+#include <memory>
#include <string>
#include <vector>
@@ -88,13 +89,12 @@ class MCFunction {
std::string Name;
MCModule *ParentModule;
- typedef std::vector<MCBasicBlock*> BasicBlockListTy;
+ typedef std::vector<std::unique_ptr<MCBasicBlock>> BasicBlockListTy;
BasicBlockListTy Blocks;
// MCModule owns the function.
friend class MCModule;
MCFunction(StringRef Name, MCModule *Parent);
- ~MCFunction();
public:
/// \brief Create an MCBasicBlock backed by Insts and add it to this function.
@@ -126,10 +126,10 @@ public:
const_iterator end() const { return Blocks.end(); }
iterator end() { return Blocks.end(); }
- const MCBasicBlock* front() const { return Blocks.front(); }
- MCBasicBlock* front() { return Blocks.front(); }
- const MCBasicBlock* back() const { return Blocks.back(); }
- MCBasicBlock* back() { return Blocks.back(); }
+ const MCBasicBlock* front() const { return Blocks.front().get(); }
+ MCBasicBlock* front() { return Blocks.front().get(); }
+ const MCBasicBlock* back() const { return Blocks.back().get(); }
+ MCBasicBlock* back() { return Blocks.back().get(); }
/// \brief Find the basic block, if any, that starts at \p StartAddr.
const MCBasicBlock *find(uint64_t StartAddr) const;
diff --git a/include/llvm/MC/MCInst.h b/include/llvm/MC/MCInst.h
index 4766815..6918280 100644
--- a/include/llvm/MC/MCInst.h
+++ b/include/llvm/MC/MCInst.h
@@ -184,18 +184,18 @@ public:
/// \brief Dump the MCInst as prettily as possible using the additional MC
/// structures, if given. Operators are separated by the \p Separator
/// string.
- void dump_pretty(raw_ostream &OS, const MCAsmInfo *MAI = 0,
- const MCInstPrinter *Printer = 0,
+ void dump_pretty(raw_ostream &OS, const MCAsmInfo *MAI = nullptr,
+ const MCInstPrinter *Printer = nullptr,
StringRef Separator = " ") const;
};
inline raw_ostream& operator<<(raw_ostream &OS, const MCOperand &MO) {
- MO.print(OS, 0);
+ MO.print(OS, nullptr);
return OS;
}
inline raw_ostream& operator<<(raw_ostream &OS, const MCInst &MI) {
- MI.print(OS, 0);
+ MI.print(OS, nullptr);
return OS;
}
diff --git a/include/llvm/MC/MCInstPrinter.h b/include/llvm/MC/MCInstPrinter.h
index b4258be..7f55b29 100644
--- a/include/llvm/MC/MCInstPrinter.h
+++ b/include/llvm/MC/MCInstPrinter.h
@@ -57,8 +57,9 @@ protected:
public:
MCInstPrinter(const MCAsmInfo &mai, const MCInstrInfo &mii,
const MCRegisterInfo &mri)
- : CommentStream(0), MAI(mai), MII(mii), MRI(mri), AvailableFeatures(0),
- UseMarkup(0), PrintImmHex(0), PrintHexStyle(HexStyle::C) {}
+ : CommentStream(nullptr), MAI(mai), MII(mii), MRI(mri),
+ AvailableFeatures(0), UseMarkup(0), PrintImmHex(0),
+ PrintHexStyle(HexStyle::C) {}
virtual ~MCInstPrinter();
diff --git a/include/llvm/MC/MCInstrDesc.h b/include/llvm/MC/MCInstrDesc.h
index 214b593..5896de7 100644
--- a/include/llvm/MC/MCInstrDesc.h
+++ b/include/llvm/MC/MCInstrDesc.h
@@ -504,7 +504,7 @@ public:
/// \brief Return the number of implicit uses this instruction has.
unsigned getNumImplicitUses() const {
- if (ImplicitUses == 0) return 0;
+ if (!ImplicitUses) return 0;
unsigned i = 0;
for (; ImplicitUses[i]; ++i) /*empty*/;
return i;
@@ -526,7 +526,7 @@ public:
/// \brief Return the number of implicit defs this instruct has.
unsigned getNumImplicitDefs() const {
- if (ImplicitDefs == 0) return 0;
+ if (!ImplicitDefs) return 0;
unsigned i = 0;
for (; ImplicitDefs[i]; ++i) /*empty*/;
return i;
@@ -544,7 +544,7 @@ public:
/// \brief Return true if this instruction implicitly
/// defines the specified physical register.
bool hasImplicitDefOfPhysReg(unsigned Reg,
- const MCRegisterInfo *MRI = 0) const {
+ const MCRegisterInfo *MRI = nullptr) const {
if (const uint16_t *ImpDefs = ImplicitDefs)
for (; *ImpDefs; ++ImpDefs)
if (*ImpDefs == Reg || (MRI && MRI->isSubRegister(Reg, *ImpDefs)))
diff --git a/include/llvm/MC/MCInstrItineraries.h b/include/llvm/MC/MCInstrItineraries.h
index c4f9e1c..5104345 100644
--- a/include/llvm/MC/MCInstrItineraries.h
+++ b/include/llvm/MC/MCInstrItineraries.h
@@ -119,8 +119,8 @@ public:
/// Ctors.
///
InstrItineraryData() : SchedModel(&MCSchedModel::DefaultSchedModel),
- Stages(0), OperandCycles(0),
- Forwardings(0), Itineraries(0) {}
+ Stages(nullptr), OperandCycles(nullptr),
+ Forwardings(nullptr), Itineraries(nullptr) {}
InstrItineraryData(const MCSchedModel *SM, const InstrStage *S,
const unsigned *OS, const unsigned *F)
@@ -129,7 +129,7 @@ public:
/// isEmpty - Returns true if there are no itineraries.
///
- bool isEmpty() const { return Itineraries == 0; }
+ bool isEmpty() const { return Itineraries == nullptr; }
/// isEndMarker - Returns true if the index is for the end marker
/// itinerary.
diff --git a/include/llvm/MC/MCModule.h b/include/llvm/MC/MCModule.h
index 63635c7..aa389cb 100644
--- a/include/llvm/MC/MCModule.h
+++ b/include/llvm/MC/MCModule.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
+#include <memory>
#include <vector>
namespace llvm {
@@ -73,7 +74,7 @@ class MCModule {
/// \name Function tracking
/// @{
- typedef std::vector<MCFunction*> FunctionListTy;
+ typedef std::vector<std::unique_ptr<MCFunction>> FunctionListTy;
FunctionListTy Functions;
/// @}
@@ -87,7 +88,7 @@ class MCModule {
friend class MCObjectDisassembler;
public:
- MCModule() : Entrypoint(0) { }
+ MCModule();
~MCModule();
/// \name Create a new MCAtom covering the specified offset range.
diff --git a/include/llvm/MC/MCObjectFileInfo.h b/include/llvm/MC/MCObjectFileInfo.h
index 1c5c19e..1a56040 100644
--- a/include/llvm/MC/MCObjectFileInfo.h
+++ b/include/llvm/MC/MCObjectFileInfo.h
@@ -44,11 +44,10 @@ protected:
/// section.
bool SupportsCompactUnwindWithoutEHFrame;
- /// PersonalityEncoding, LSDAEncoding, FDEEncoding, TTypeEncoding - Some
- /// encoding values for EH.
+ /// PersonalityEncoding, LSDAEncoding, TTypeEncoding - Some encoding values
+ /// for EH.
unsigned PersonalityEncoding;
unsigned LSDAEncoding;
- unsigned FDEEncoding;
unsigned FDECFIEncoding;
unsigned TTypeEncoding;
@@ -217,9 +216,7 @@ public:
unsigned getPersonalityEncoding() const { return PersonalityEncoding; }
unsigned getLSDAEncoding() const { return LSDAEncoding; }
- unsigned getFDEEncoding(bool CFI) const {
- return CFI ? FDECFIEncoding : FDEEncoding;
- }
+ unsigned getFDEEncoding() const { return FDECFIEncoding; }
unsigned getTTypeEncoding() const { return TTypeEncoding; }
unsigned getCompactUnwindDwarfEHFrameOnly() const {
diff --git a/include/llvm/MC/MCObjectStreamer.h b/include/llvm/MC/MCObjectStreamer.h
index a42b7a05..e41a8ba 100644
--- a/include/llvm/MC/MCObjectStreamer.h
+++ b/include/llvm/MC/MCObjectStreamer.h
@@ -35,6 +35,8 @@ class MCObjectStreamer : public MCStreamer {
MCAssembler *Assembler;
MCSectionData *CurSectionData;
MCSectionData::iterator CurInsertionPoint;
+ bool EmitEHFrame;
+ bool EmitDebugFrame;
virtual void EmitInstToData(const MCInst &Inst, const MCSubtargetInfo&) = 0;
void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) override;
@@ -54,6 +56,12 @@ public:
/// Object streamers require the integrated assembler.
bool isIntegratedAssemblerRequired() const override { return true; }
+ MCSymbolData &getOrCreateSymbolData(const MCSymbol *Symbol) {
+ return getAssembler().getOrCreateSymbolData(*Symbol);
+ }
+ void EmitFrames(MCAsmBackend *MAB);
+ void EmitCFISections(bool EH, bool Debug) override;
+
protected:
MCSectionData *getCurrentSectionData() const {
return CurSectionData;
@@ -81,7 +89,8 @@ public:
void EmitLabel(MCSymbol *Symbol) override;
void EmitDebugLabel(MCSymbol *Symbol) override;
void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) override;
- void EmitValueImpl(const MCExpr *Value, unsigned Size) override;
+ void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ const SMLoc &Loc = SMLoc()) override;
void EmitULEB128Value(const MCExpr *Value) override;
void EmitSLEB128Value(const MCExpr *Value) override;
void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) override;
@@ -109,9 +118,9 @@ public:
StringRef FileName) override;
void EmitDwarfAdvanceLineAddr(int64_t LineDelta, const MCSymbol *LastLabel,
const MCSymbol *Label,
- unsigned PointerSize) override;
+ unsigned PointerSize);
void EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
- const MCSymbol *Label) override;
+ const MCSymbol *Label);
void EmitGPRel32Value(const MCExpr *Value) override;
void EmitGPRel64Value(const MCExpr *Value) override;
void EmitFill(uint64_t NumBytes, uint8_t FillValue) override;
diff --git a/include/llvm/MC/MCParser/AsmLexer.h b/include/llvm/MC/MCParser/AsmLexer.h
index f36011c..59b5c09 100644
--- a/include/llvm/MC/MCParser/AsmLexer.h
+++ b/include/llvm/MC/MCParser/AsmLexer.h
@@ -42,7 +42,7 @@ public:
AsmLexer(const MCAsmInfo &MAI);
~AsmLexer();
- void setBuffer(const MemoryBuffer *buf, const char *ptr = NULL);
+ void setBuffer(const MemoryBuffer *buf, const char *ptr = nullptr);
StringRef LexUntilEndOfStatement() override;
StringRef LexUntilEndOfLine();
diff --git a/include/llvm/MC/MCParser/MCAsmParser.h b/include/llvm/MC/MCParser/MCAsmParser.h
index 0389caa..f751786 100644
--- a/include/llvm/MC/MCParser/MCAsmParser.h
+++ b/include/llvm/MC/MCParser/MCAsmParser.h
@@ -39,7 +39,7 @@ public:
unsigned Length, Size, Type;
void clear() {
- OpDecl = 0;
+ OpDecl = nullptr;
IsVarDecl = false;
Length = 1;
Size = 0;
diff --git a/include/llvm/MC/MCParser/MCParsedAsmOperand.h b/include/llvm/MC/MCParser/MCParsedAsmOperand.h
index 818fbbd..e8740aa 100644
--- a/include/llvm/MC/MCParser/MCParsedAsmOperand.h
+++ b/include/llvm/MC/MCParser/MCParsedAsmOperand.h
@@ -38,7 +38,7 @@ public:
unsigned getMCOperandNum() { return MCOperandNum; }
virtual StringRef getSymName() { return StringRef(); }
- virtual void *getOpDecl() { return 0; }
+ virtual void *getOpDecl() { return nullptr; }
/// isToken - Is this a token operand?
virtual bool isToken() const = 0;
diff --git a/include/llvm/MC/MCRegisterInfo.h b/include/llvm/MC/MCRegisterInfo.h
index 3fa89c1..766f631 100644
--- a/include/llvm/MC/MCRegisterInfo.h
+++ b/include/llvm/MC/MCRegisterInfo.h
@@ -159,7 +159,7 @@ private:
const MCRegisterClass *Classes; // Pointer to the regclass array
unsigned NumClasses; // Number of entries in the array
unsigned NumRegUnits; // Number of regunits.
- const uint16_t (*RegUnitRoots)[2]; // Pointer to regunit root table.
+ const MCPhysReg (*RegUnitRoots)[2]; // Pointer to regunit root table.
const MCPhysReg *DiffLists; // Pointer to the difflists array
const char *RegStrings; // Pointer to the string table.
const uint16_t *SubRegIndices; // Pointer to the subreg lookup
@@ -191,7 +191,7 @@ public:
protected:
/// Create an invalid iterator. Call init() to point to something useful.
- DiffListIterator() : Val(0), List(0) {}
+ DiffListIterator() : Val(0), List(nullptr) {}
/// init - Point the iterator to InitVal, decoding subsequent values from
/// DiffList. The iterator will initially point to InitVal, sub-classes are
@@ -223,7 +223,7 @@ public:
void operator++() {
// The end of the list is encoded as a 0 differential.
if (!advance())
- List = 0;
+ List = nullptr;
}
};
@@ -239,7 +239,7 @@ public:
void InitMCRegisterInfo(const MCRegisterDesc *D, unsigned NR, unsigned RA,
unsigned PC,
const MCRegisterClass *C, unsigned NC,
- const uint16_t (*RURoots)[2],
+ const MCPhysReg (*RURoots)[2],
unsigned NRU,
const MCPhysReg *DL,
const char *Strings,
diff --git a/include/llvm/MC/MCSchedule.h b/include/llvm/MC/MCSchedule.h
index d1ab411..862a0fd 100644
--- a/include/llvm/MC/MCSchedule.h
+++ b/include/llvm/MC/MCSchedule.h
@@ -159,6 +159,14 @@ public:
unsigned MicroOpBufferSize;
static const unsigned DefaultMicroOpBufferSize = 0;
+ // LoopMicroOpBufferSize is the number of micro-ops that the processor may
+ // buffer for optimized loop execution. More generally, this represents the
+ // optimal number of micro-ops in a loop body. A loop may be partially
+ // unrolled to bring the count of micro-ops in the loop body closer to this
+ // number.
+ unsigned LoopMicroOpBufferSize;
+ static const unsigned DefaultLoopMicroOpBufferSize = 0;
+
// LoadLatency is the expected latency of load instructions.
//
// If MinLatency >= 0, this may be overriden for individual load opcodes by
@@ -198,23 +206,24 @@ public:
// MCSchedModel instead of using a generated itinerary.
MCSchedModel(): IssueWidth(DefaultIssueWidth),
MicroOpBufferSize(DefaultMicroOpBufferSize),
+ LoopMicroOpBufferSize(DefaultLoopMicroOpBufferSize),
LoadLatency(DefaultLoadLatency),
HighLatency(DefaultHighLatency),
MispredictPenalty(DefaultMispredictPenalty),
- CompleteModel(true),
- ProcID(0), ProcResourceTable(0), SchedClassTable(0),
- NumProcResourceKinds(0), NumSchedClasses(0),
- InstrItineraries(0) {
+ CompleteModel(true), ProcID(0), ProcResourceTable(nullptr),
+ SchedClassTable(nullptr), NumProcResourceKinds(0),
+ NumSchedClasses(0), InstrItineraries(nullptr) {
(void)NumProcResourceKinds;
(void)NumSchedClasses;
}
// Table-gen driven ctor.
- MCSchedModel(unsigned iw, int mbs, unsigned ll, unsigned hl,
+ MCSchedModel(unsigned iw, int mbs, int lmbs, unsigned ll, unsigned hl,
unsigned mp, bool cm, unsigned pi, const MCProcResourceDesc *pr,
const MCSchedClassDesc *sc, unsigned npr, unsigned nsc,
const InstrItinerary *ii):
- IssueWidth(iw), MicroOpBufferSize(mbs), LoadLatency(ll), HighLatency(hl),
+ IssueWidth(iw), MicroOpBufferSize(mbs), LoopMicroOpBufferSize(lmbs),
+ LoadLatency(ll), HighLatency(hl),
MispredictPenalty(mp), CompleteModel(cm), ProcID(pi),
ProcResourceTable(pr), SchedClassTable(sc), NumProcResourceKinds(npr),
NumSchedClasses(nsc), InstrItineraries(ii) {}
diff --git a/include/llvm/MC/MCSectionCOFF.h b/include/llvm/MC/MCSectionCOFF.h
index aa02d9a..a428f9e 100644
--- a/include/llvm/MC/MCSectionCOFF.h
+++ b/include/llvm/MC/MCSectionCOFF.h
@@ -58,7 +58,7 @@ class MCSymbol;
assert ((Characteristics & 0x00F00000) == 0 &&
"alignment must not be set upon section creation");
assert ((Selection == COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE) ==
- (Assoc != 0) &&
+ (Assoc != nullptr) &&
"associative COMDAT section must have an associated section");
}
~MCSectionCOFF();
@@ -79,7 +79,8 @@ class MCSymbol;
int getSelection() const { return Selection; }
const MCSectionCOFF *getAssocSection() const { return Assoc; }
- void setSelection(int Selection, const MCSectionCOFF *Assoc = 0) const;
+ void setSelection(int Selection,
+ const MCSectionCOFF *Assoc = nullptr) const;
void PrintSwitchToSection(const MCAsmInfo &MAI, raw_ostream &OS,
const MCExpr *Subsection) const override;
diff --git a/include/llvm/MC/MCSectionELF.h b/include/llvm/MC/MCSectionELF.h
index 89c02cc..5ec23f1 100644
--- a/include/llvm/MC/MCSectionELF.h
+++ b/include/llvm/MC/MCSectionELF.h
@@ -14,7 +14,7 @@
#ifndef LLVM_MC_MCSECTIONELF_H
#define LLVM_MC_MCSECTIONELF_H
-#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Debug.h"
@@ -53,6 +53,9 @@ private:
: MCSection(SV_ELF, K), SectionName(Section), Type(type), Flags(flags),
EntrySize(entrySize), Group(group) {}
~MCSectionELF();
+
+ void setSectionName(StringRef Name) { SectionName = Name; }
+
public:
/// ShouldOmitSectionDirective - Decides whether a '.section' directive
diff --git a/include/llvm/MC/MCStreamer.h b/include/llvm/MC/MCStreamer.h
index 8ee60c1..2a8367a 100644
--- a/include/llvm/MC/MCStreamer.h
+++ b/include/llvm/MC/MCStreamer.h
@@ -121,6 +121,8 @@ public:
virtual void AnnotateTLSDescriptorSequence(const MCSymbolRefExpr *SRE);
+ virtual void emitThumbSet(MCSymbol *Symbol, const MCExpr *Value);
+
void finish() override;
/// Callback used to implement the ldr= pseudo.
@@ -152,9 +154,6 @@ class MCStreamer {
MCStreamer(const MCStreamer &) LLVM_DELETED_FUNCTION;
MCStreamer &operator=(const MCStreamer &) LLVM_DELETED_FUNCTION;
- bool EmitEHFrame;
- bool EmitDebugFrame;
-
std::vector<MCDwarfFrameInfo> FrameInfos;
MCDwarfFrameInfo *getCurrentFrameInfo();
MCSymbol *EmitCFICommon();
@@ -187,7 +186,6 @@ protected:
virtual void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame);
void RecordProcEnd(MCDwarfFrameInfo &Frame);
virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &CurFrame);
- void EmitFrames(MCAsmBackend *MAB, bool usingCFI);
MCWin64EHUnwindInfo *getCurrentW64UnwindInfo() {
return CurrentW64UnwindInfo;
@@ -332,7 +330,8 @@ public:
/// @p Section. This is required to update CurSection.
///
/// This corresponds to assembler directives like .section, .text, etc.
- void SwitchSection(const MCSection *Section, const MCExpr *Subsection = 0) {
+ void SwitchSection(const MCSection *Section,
+ const MCExpr *Subsection = nullptr) {
assert(Section && "Cannot switch to a null section!");
MCSectionSubPair curSection = SectionStack.back().first;
SectionStack.back().second = curSection;
@@ -346,7 +345,7 @@ public:
/// emitted to @p Section. This is required to update CurSection. This
/// version does not call ChangeSection.
void SwitchSectionNoChange(const MCSection *Section,
- const MCExpr *Subsection = 0) {
+ const MCExpr *Subsection = nullptr) {
assert(Section && "Cannot switch to a null section!");
MCSectionSubPair curSection = SectionStack.back().first;
SectionStack.back().second = curSection;
@@ -397,9 +396,6 @@ public:
/// a Thumb mode function (ARM target only).
virtual void EmitThumbFunc(MCSymbol *Func) = 0;
- /// getOrCreateSymbolData - Get symbol data for given symbol.
- virtual MCSymbolData &getOrCreateSymbolData(const MCSymbol *Symbol);
-
/// EmitAssignment - Emit an assignment of @p Value to @p Symbol.
///
/// This corresponds to an assembler statement such as:
@@ -495,8 +491,9 @@ public:
/// @param Size - The size of the zerofill symbol.
/// @param ByteAlignment - The alignment of the zerofill symbol if
/// non-zero. This must be a power of 2 on some targets.
- virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
- uint64_t Size = 0, unsigned ByteAlignment = 0) = 0;
+ virtual void EmitZerofill(const MCSection *Section,
+ MCSymbol *Symbol = nullptr, uint64_t Size = 0,
+ unsigned ByteAlignment = 0) = 0;
/// EmitTBSSSymbol - Emit a thread local bss (.tbss) symbol.
///
@@ -527,9 +524,12 @@ public:
/// @param Value - The value to emit.
/// @param Size - The size of the integer (in bytes) to emit. This must
/// match a native machine width.
- virtual void EmitValueImpl(const MCExpr *Value, unsigned Size) = 0;
+ /// @param Loc - The location of the expression for error reporting.
+ virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ const SMLoc &Loc = SMLoc()) = 0;
- void EmitValue(const MCExpr *Value, unsigned Size);
+ void EmitValue(const MCExpr *Value, unsigned Size,
+ const SMLoc &Loc = SMLoc());
/// EmitIntValue - Special case of EmitValue that avoids the client having
/// to pass in a MCExpr for constant integers.
@@ -650,14 +650,6 @@ public:
unsigned Isa, unsigned Discriminator,
StringRef FileName);
- virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
- const MCSymbol *LastLabel,
- const MCSymbol *Label,
- unsigned PointerSize) = 0;
-
- virtual void EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
- const MCSymbol *Label) {}
-
virtual MCSymbol *getDwarfLineTableSymbol(unsigned CUID);
void EmitDwarfSetLineAddr(int64_t LineDelta, const MCSymbol *Label,
@@ -754,10 +746,9 @@ MCStreamer *createNullStreamer(MCContext &Ctx);
/// \param ShowInst - Whether to show the MCInst representation inline with
/// the assembly.
MCStreamer *createAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
- bool isVerboseAsm, bool useCFI,
- bool useDwarfDirectory, MCInstPrinter *InstPrint,
- MCCodeEmitter *CE, MCAsmBackend *TAB,
- bool ShowInst);
+ bool isVerboseAsm, bool useDwarfDirectory,
+ MCInstPrinter *InstPrint, MCCodeEmitter *CE,
+ MCAsmBackend *TAB, bool ShowInst);
/// createMachOStreamer - Create a machine code streamer which will generate
/// Mach-O format object files.
@@ -768,14 +759,6 @@ MCStreamer *createMachOStreamer(MCContext &Ctx, MCAsmBackend &TAB,
bool RelaxAll = false,
bool LabelSections = false);
-/// createWinCOFFStreamer - Create a machine code streamer which will
-/// generate Microsoft COFF format object files.
-///
-/// Takes ownership of \p TAB and \p CE.
-MCStreamer *createWinCOFFStreamer(MCContext &Ctx, MCAsmBackend &TAB,
- MCCodeEmitter &CE, raw_ostream &OS,
- bool RelaxAll = false);
-
/// createELFStreamer - Create a machine code streamer which will generate
/// ELF format object files.
MCStreamer *createELFStreamer(MCContext &Ctx, MCAsmBackend &TAB,
diff --git a/include/llvm/MC/MCSubtargetInfo.h b/include/llvm/MC/MCSubtargetInfo.h
index 01e8236..088c5e7 100644
--- a/include/llvm/MC/MCSubtargetInfo.h
+++ b/include/llvm/MC/MCSubtargetInfo.h
@@ -28,8 +28,8 @@ class StringRef;
///
class MCSubtargetInfo {
std::string TargetTriple; // Target triple
- const SubtargetFeatureKV *ProcFeatures; // Processor feature list
- const SubtargetFeatureKV *ProcDesc; // Processor descriptions
+ ArrayRef<SubtargetFeatureKV> ProcFeatures; // Processor feature list
+ ArrayRef<SubtargetFeatureKV> ProcDesc; // Processor descriptions
// Scheduler machine model
const SubtargetInfoKV *ProcSchedModels;
@@ -41,21 +41,18 @@ class MCSubtargetInfo {
const InstrStage *Stages; // Instruction itinerary stages
const unsigned *OperandCycles; // Itinerary operand cycles
const unsigned *ForwardingPaths; // Forwarding paths
- unsigned NumFeatures; // Number of processor features
- unsigned NumProcs; // Number of processors
uint64_t FeatureBits; // Feature bits for current CPU + FS
public:
void InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS,
- const SubtargetFeatureKV *PF,
- const SubtargetFeatureKV *PD,
+ ArrayRef<SubtargetFeatureKV> PF,
+ ArrayRef<SubtargetFeatureKV> PD,
const SubtargetInfoKV *ProcSched,
const MCWriteProcResEntry *WPR,
const MCWriteLatencyEntry *WL,
const MCReadAdvanceEntry *RA,
const InstrStage *IS,
- const unsigned *OC, const unsigned *FP,
- unsigned NF, unsigned NP);
+ const unsigned *OC, const unsigned *FP);
/// getTargetTriple - Return the target triple string.
StringRef getTargetTriple() const {
diff --git a/include/llvm/MC/MCSymbol.h b/include/llvm/MC/MCSymbol.h
index ea14da1..0b3c3ce 100644
--- a/include/llvm/MC/MCSymbol.h
+++ b/include/llvm/MC/MCSymbol.h
@@ -60,7 +60,7 @@ namespace llvm {
friend class MCExpr;
friend class MCContext;
MCSymbol(StringRef name, bool isTemporary)
- : Name(name), Section(0), Value(0),
+ : Name(name), Section(nullptr), Value(nullptr),
IsTemporary(isTemporary), IsUsed(false) {}
MCSymbol(const MCSymbol&) LLVM_DELETED_FUNCTION;
@@ -87,7 +87,7 @@ namespace llvm {
///
/// Defined symbols are either absolute or in some section.
bool isDefined() const {
- return Section != 0;
+ return Section != nullptr;
}
/// isInSection - Check if this symbol is defined in some section (i.e., it
@@ -118,7 +118,7 @@ namespace llvm {
/// setUndefined - Mark the symbol as undefined.
void setUndefined() {
- Section = 0;
+ Section = nullptr;
}
/// setAbsolute - Mark the symbol as absolute.
@@ -130,7 +130,7 @@ namespace llvm {
/// isVariable - Check if this is a variable symbol.
bool isVariable() const {
- return Value != 0;
+ return Value != nullptr;
}
/// getVariableValue() - Get the value for variable symbols.
diff --git a/include/llvm/MC/MCTargetAsmParser.h b/include/llvm/MC/MCTargetAsmParser.h
index 0073136..18ef6c2 100644
--- a/include/llvm/MC/MCTargetAsmParser.h
+++ b/include/llvm/MC/MCTargetAsmParser.h
@@ -12,14 +12,15 @@
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCParser/MCAsmParserExtension.h"
+#include "llvm/MC/MCTargetOptions.h"
namespace llvm {
-class MCStreamer;
-class StringRef;
-class SMLoc;
class AsmToken;
-class MCParsedAsmOperand;
class MCInst;
+class MCParsedAsmOperand;
+class MCStreamer;
+class SMLoc;
+class StringRef;
template <typename T> class SmallVectorImpl;
enum AsmRewriteKind {
@@ -63,7 +64,7 @@ struct ParseInstructionInfo {
SmallVectorImpl<AsmRewrite> *AsmRewrites;
- ParseInstructionInfo() : AsmRewrites(0) {}
+ ParseInstructionInfo() : AsmRewrites(nullptr) {}
ParseInstructionInfo(SmallVectorImpl<AsmRewrite> *rewrites)
: AsmRewrites(rewrites) {}
@@ -97,6 +98,9 @@ protected: // Can only create subclasses.
/// ms-style inline assembly.
MCAsmParserSemaCallback *SemaCallback;
+ /// Set of options which affects instrumentation of inline assembly.
+ MCTargetOptions MCOptions;
+
public:
virtual ~MCTargetAsmParser();
@@ -179,7 +183,7 @@ public:
virtual const MCExpr *applyModifierToExpr(const MCExpr *E,
MCSymbolRefExpr::VariantKind,
MCContext &Ctx) {
- return 0;
+ return nullptr;
}
virtual void onLabelParsed(MCSymbol *Symbol) { };
diff --git a/include/llvm/MC/MCTargetOptions.h b/include/llvm/MC/MCTargetOptions.h
new file mode 100644
index 0000000..80cc8be
--- /dev/null
+++ b/include/llvm/MC/MCTargetOptions.h
@@ -0,0 +1,54 @@
+//===- MCTargetOptions.h - MC Target Options -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCTARGETOPTIONS_H
+#define LLVM_MC_MCTARGETOPTIONS_H
+
+namespace llvm {
+
+class MCTargetOptions {
+public:
+ enum AsmInstrumentation {
+ AsmInstrumentationNone,
+ AsmInstrumentationAddress
+ };
+
+ /// Enables AddressSanitizer instrumentation at machine level.
+ bool SanitizeAddress : 1;
+
+ bool MCRelaxAll : 1;
+ bool MCNoExecStack : 1;
+ bool MCSaveTempLabels : 1;
+ bool MCUseDwarfDirectory : 1;
+ bool ShowMCEncoding : 1;
+ bool ShowMCInst : 1;
+ bool AsmVerbose : 1;
+ MCTargetOptions();
+};
+
+inline bool operator==(const MCTargetOptions &LHS, const MCTargetOptions &RHS) {
+#define ARE_EQUAL(X) LHS.X == RHS.X
+ return (ARE_EQUAL(SanitizeAddress) &&
+ ARE_EQUAL(MCRelaxAll) &&
+ ARE_EQUAL(MCNoExecStack) &&
+ ARE_EQUAL(MCSaveTempLabels) &&
+ ARE_EQUAL(MCUseDwarfDirectory) &&
+ ARE_EQUAL(ShowMCEncoding) &&
+ ARE_EQUAL(ShowMCInst) &&
+ ARE_EQUAL(AsmVerbose));
+#undef ARE_EQUAL
+}
+
+inline bool operator!=(const MCTargetOptions &LHS, const MCTargetOptions &RHS) {
+ return !(LHS == RHS);
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/MC/MCTargetOptionsCommandFlags.h b/include/llvm/MC/MCTargetOptionsCommandFlags.h
new file mode 100644
index 0000000..17a117a
--- /dev/null
+++ b/include/llvm/MC/MCTargetOptionsCommandFlags.h
@@ -0,0 +1,44 @@
+//===-- MCTargetOptionsCommandFlags.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains machine code-specific flags that are shared between
+// different command line tools.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCTARGETOPTIONSCOMMANDFLAGS_H
+#define LLVM_MC_MCTARGETOPTIONSCOMMANDFLAGS_H
+
+#include "llvm/Support/CommandLine.h"
+#include "llvm/MC/MCTargetOptions.h"
+using namespace llvm;
+
+cl::opt<MCTargetOptions::AsmInstrumentation> AsmInstrumentation(
+ "asm-instrumentation", cl::desc("Instrumentation of inline assembly and "
+ "assembly source files"),
+ cl::init(MCTargetOptions::AsmInstrumentationNone),
+ cl::values(clEnumValN(MCTargetOptions::AsmInstrumentationNone, "none",
+ "no instrumentation at all"),
+ clEnumValN(MCTargetOptions::AsmInstrumentationAddress, "address",
+ "instrument instructions with memory arguments"),
+ clEnumValEnd));
+
+cl::opt<bool> RelaxAll("mc-relax-all",
+ cl::desc("When used with filetype=obj, "
+ "relax all fixups in the emitted object file"));
+
+static inline MCTargetOptions InitMCTargetOptionsFromFlags() {
+ MCTargetOptions Options;
+ Options.SanitizeAddress =
+ (AsmInstrumentation == MCTargetOptions::AsmInstrumentationAddress);
+ Options.MCRelaxAll = RelaxAll;
+ return Options;
+}
+
+#endif
diff --git a/include/llvm/MC/MCValue.h b/include/llvm/MC/MCValue.h
index f4ea511..dd86979 100644
--- a/include/llvm/MC/MCValue.h
+++ b/include/llvm/MC/MCValue.h
@@ -14,14 +14,13 @@
#ifndef LLVM_MC_MCVALUE_H
#define LLVM_MC_MCVALUE_H
+#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
namespace llvm {
class MCAsmInfo;
-class MCSymbol;
-class MCSymbolRefExpr;
class raw_ostream;
/// MCValue - This represents an "assembler immediate". In its most
@@ -61,7 +60,10 @@ public:
/// dump - Print the value to stderr.
void dump() const;
- static MCValue get(const MCSymbolRefExpr *SymA, const MCSymbolRefExpr *SymB=0,
+ MCSymbolRefExpr::VariantKind getAccessVariant() const;
+
+ static MCValue get(const MCSymbolRefExpr *SymA,
+ const MCSymbolRefExpr *SymB = nullptr,
int64_t Val = 0, uint32_t RefKind = 0) {
MCValue R;
assert((!SymB || SymA) && "Invalid relocatable MCValue!");
@@ -75,8 +77,8 @@ public:
static MCValue get(int64_t Val) {
MCValue R;
R.Cst = Val;
- R.SymA = 0;
- R.SymB = 0;
+ R.SymA = nullptr;
+ R.SymB = nullptr;
R.RefKind = 0;
return R;
}
diff --git a/include/llvm/MC/MCWin64EH.h b/include/llvm/MC/MCWin64EH.h
index eb4665a..d21e762 100644
--- a/include/llvm/MC/MCWin64EH.h
+++ b/include/llvm/MC/MCWin64EH.h
@@ -61,11 +61,11 @@ namespace llvm {
};
struct MCWin64EHUnwindInfo {
- MCWin64EHUnwindInfo() : Begin(0), End(0), ExceptionHandler(0),
- Function(0), PrologEnd(0), Symbol(0),
- HandlesUnwind(false), HandlesExceptions(false),
- LastFrameInst(-1), ChainedParent(0),
- Instructions() {}
+ MCWin64EHUnwindInfo()
+ : Begin(nullptr), End(nullptr),ExceptionHandler(nullptr),
+ Function(nullptr), PrologEnd(nullptr), Symbol(nullptr),
+ HandlesUnwind(false), HandlesExceptions(false), LastFrameInst(-1),
+ ChainedParent(nullptr), Instructions() {}
MCSymbol *Begin;
MCSymbol *End;
const MCSymbol *ExceptionHandler;
diff --git a/include/llvm/MC/MCWinCOFFObjectWriter.h b/include/llvm/MC/MCWinCOFFObjectWriter.h
index 213481c..dad7bb5 100644
--- a/include/llvm/MC/MCWinCOFFObjectWriter.h
+++ b/include/llvm/MC/MCWinCOFFObjectWriter.h
@@ -30,6 +30,7 @@ namespace llvm {
virtual unsigned getRelocType(const MCValue &Target,
const MCFixup &Fixup,
bool IsCrossSection) const = 0;
+ virtual bool recordRelocation(const MCFixup &) const { return true; }
};
/// \brief Construct a new Win COFF writer instance.
diff --git a/include/llvm/MC/MCWinCOFFStreamer.h b/include/llvm/MC/MCWinCOFFStreamer.h
new file mode 100644
index 0000000..34e39bb
--- /dev/null
+++ b/include/llvm/MC/MCWinCOFFStreamer.h
@@ -0,0 +1,75 @@
+//===- MCWinCOFFStreamer.h - COFF Object File Interface ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCWINCOFFSTREAMER_H
+#define LLVM_MC_MCWINCOFFSTREAMER_H
+
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCObjectStreamer.h"
+
+namespace llvm {
+class MCAsmBackend;
+class MCContext;
+class MCCodeEmitter;
+class MCExpr;
+class MCInst;
+class MCSection;
+class MCSubtargetInfo;
+class MCSymbol;
+class StringRef;
+class raw_ostream;
+
+class MCWinCOFFStreamer : public MCObjectStreamer {
+public:
+ MCWinCOFFStreamer(MCContext &Context, MCAsmBackend &MAB, MCCodeEmitter &CE,
+ raw_ostream &OS);
+
+ /// \name MCStreamer interface
+ /// \{
+
+ void InitSections() override;
+ void EmitLabel(MCSymbol *Symbol) override;
+ void EmitDebugLabel(MCSymbol *Symbol) override;
+ void EmitAssemblerFlag(MCAssemblerFlag Flag) override;
+ void EmitThumbFunc(MCSymbol *Func) override;
+ bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override;
+ void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) override;
+ void BeginCOFFSymbolDef(MCSymbol const *Symbol) override;
+ void EmitCOFFSymbolStorageClass(int StorageClass) override;
+ void EmitCOFFSymbolType(int Type) override;
+ void EndCOFFSymbolDef() override;
+ void EmitCOFFSectionIndex(MCSymbol const *Symbol) override;
+ void EmitCOFFSecRel32(MCSymbol const *Symbol) override;
+ void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) override;
+ void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+ void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+ void EmitZerofill(const MCSection *Section, MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+ void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+ void EmitFileDirective(StringRef Filename) override;
+ void EmitIdent(StringRef IdentString) override;
+ void EmitWin64EHHandlerData() override;
+ void FinishImpl() override;
+
+ /// \}
+
+protected:
+ const MCSymbol *CurSymbol;
+ void EmitInstToData(const MCInst &Inst, const MCSubtargetInfo &STI) override;
+
+private:
+ LLVM_ATTRIBUTE_NORETURN void FatalError(const Twine &Msg) const;
+};
+}
+
+#endif
+
diff --git a/include/llvm/MC/SubtargetFeature.h b/include/llvm/MC/SubtargetFeature.h
index d0735cc..c5d62a6 100644
--- a/include/llvm/MC/SubtargetFeature.h
+++ b/include/llvm/MC/SubtargetFeature.h
@@ -18,9 +18,9 @@
#ifndef LLVM_MC_SUBTARGETFEATURE_H
#define LLVM_MC_SUBTARGETFEATURE_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/DataTypes.h"
-#include <vector>
namespace llvm {
class raw_ostream;
@@ -78,20 +78,17 @@ public:
std::string getString() const;
/// Adding Features.
- void AddFeature(const StringRef String, bool IsEnabled = true);
+ void AddFeature(const StringRef String);
/// ToggleFeature - Toggle a feature and returns the newly updated feature
/// bits.
uint64_t ToggleFeature(uint64_t Bits, const StringRef String,
- const SubtargetFeatureKV *FeatureTable,
- size_t FeatureTableSize);
+ ArrayRef<SubtargetFeatureKV> FeatureTable);
/// Get feature bits of a CPU.
uint64_t getFeatureBits(const StringRef CPU,
- const SubtargetFeatureKV *CPUTable,
- size_t CPUTableSize,
- const SubtargetFeatureKV *FeatureTable,
- size_t FeatureTableSize);
+ ArrayRef<SubtargetFeatureKV> CPUTable,
+ ArrayRef<SubtargetFeatureKV> FeatureTable);
/// Print feature string.
void print(raw_ostream &OS) const;
diff --git a/include/llvm/Object/Archive.h b/include/llvm/Object/Archive.h
index 4fae76f..652b659 100644
--- a/include/llvm/Object/Archive.h
+++ b/include/llvm/Object/Archive.h
@@ -89,21 +89,17 @@ public:
return StringRef(Data.data() + StartOfFile, getSize());
}
- error_code getMemoryBuffer(OwningPtr<MemoryBuffer> &Result,
- bool FullPath = false) const;
error_code getMemoryBuffer(std::unique_ptr<MemoryBuffer> &Result,
bool FullPath = false) const;
- error_code getAsBinary(OwningPtr<Binary> &Result,
- LLVMContext *Context = 0) const;
error_code getAsBinary(std::unique_ptr<Binary> &Result,
- LLVMContext *Context = 0) const;
+ LLVMContext *Context = nullptr) const;
};
class child_iterator {
Child child;
public:
- child_iterator() : child(Child(0, 0)) {}
+ child_iterator() : child(Child(nullptr, nullptr)) {}
child_iterator(const Child &c) : child(c) {}
const Child* operator->() const {
return &child;
diff --git a/include/llvm/Object/Binary.h b/include/llvm/Object/Binary.h
index b10e40a..8ac84e7 100644
--- a/include/llvm/Object/Binary.h
+++ b/include/llvm/Object/Binary.h
@@ -128,7 +128,8 @@ public:
/// @param Source The data to create the Binary from. Ownership is transferred
/// to the Binary if successful. If an error is returned,
/// Source is destroyed by createBinary before returning.
-ErrorOr<Binary *> createBinary(MemoryBuffer *Source, LLVMContext *Context = 0);
+ErrorOr<Binary *> createBinary(MemoryBuffer *Source,
+ LLVMContext *Context = nullptr);
ErrorOr<Binary *> createBinary(StringRef Path);
}
diff --git a/include/llvm/Object/COFF.h b/include/llvm/Object/COFF.h
index 6e05c2d..bd9c677 100644
--- a/include/llvm/Object/COFF.h
+++ b/include/llvm/Object/COFF.h
@@ -287,6 +287,10 @@ struct coff_aux_weak_external {
char Unused[10];
};
+struct coff_aux_file {
+ char FileName[18];
+};
+
struct coff_aux_section_definition {
support::ulittle32_t Length;
support::ulittle16_t NumberOfRelocations;
@@ -387,7 +391,6 @@ protected:
bool &Result) const override;
relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
relocation_iterator section_rel_end(DataRefImpl Sec) const override;
- bool section_rel_empty(DataRefImpl Sec) const override;
void moveRelocationNext(DataRefImpl &Rel) const override;
error_code getRelocationAddress(DataRefImpl Rel,
@@ -461,7 +464,7 @@ public:
// The iterator for the import directory table.
class ImportDirectoryEntryRef {
public:
- ImportDirectoryEntryRef() : OwningObject(0) {}
+ ImportDirectoryEntryRef() : OwningObject(nullptr) {}
ImportDirectoryEntryRef(const import_directory_table_entry *Table, uint32_t I,
const COFFObjectFile *Owner)
: ImportTable(Table), Index(I), OwningObject(Owner) {}
@@ -485,7 +488,7 @@ private:
// The iterator for the export directory table entry.
class ExportDirectoryEntryRef {
public:
- ExportDirectoryEntryRef() : OwningObject(0) {}
+ ExportDirectoryEntryRef() : OwningObject(nullptr) {}
ExportDirectoryEntryRef(const export_directory_table_entry *Table, uint32_t I,
const COFFObjectFile *Owner)
: ExportTable(Table), Index(I), OwningObject(Owner) {}
diff --git a/include/llvm/Object/COFFYAML.h b/include/llvm/Object/COFFYAML.h
index b5f9ccc..3f48e07 100644
--- a/include/llvm/Object/COFFYAML.h
+++ b/include/llvm/Object/COFFYAML.h
@@ -121,8 +121,13 @@ struct ScalarEnumerationTraits<COFF::SymbolComplexType> {
};
template <>
-struct ScalarEnumerationTraits<COFF::RelocationTypeX86> {
- static void enumeration(IO &IO, COFF::RelocationTypeX86 &Value);
+struct ScalarEnumerationTraits<COFF::RelocationTypeI386> {
+ static void enumeration(IO &IO, COFF::RelocationTypeI386 &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::RelocationTypeAMD64> {
+ static void enumeration(IO &IO, COFF::RelocationTypeAMD64 &Value);
};
template <>
diff --git a/include/llvm/Object/ELF.h b/include/llvm/Object/ELF.h
index 824e06e..ee97d4e 100644
--- a/include/llvm/Object/ELF.h
+++ b/include/llvm/Object/ELF.h
@@ -60,12 +60,12 @@ public:
public:
typedef ptrdiff_t difference_type;
typedef EntT value_type;
- typedef std::random_access_iterator_tag iterator_category;
+ typedef std::forward_iterator_tag iterator_category;
typedef value_type &reference;
typedef value_type *pointer;
/// \brief Default construct iterator.
- ELFEntityIterator() : EntitySize(0), Current(0) {}
+ ELFEntityIterator() : EntitySize(0), Current(nullptr) {}
ELFEntityIterator(uintX_t EntSize, const char *Start)
: EntitySize(EntSize), Current(Start) {}
@@ -136,6 +136,7 @@ public:
typedef ELFEntityIterator<const Elf_Rela> Elf_Rela_Iter;
typedef ELFEntityIterator<const Elf_Rel> Elf_Rel_Iter;
typedef ELFEntityIterator<const Elf_Shdr> Elf_Shdr_Iter;
+ typedef iterator_range<Elf_Shdr_Iter> Elf_Shdr_Range;
/// \brief Archive files are 2 byte aligned, so we need this for
/// PointerIntPair to work.
@@ -249,7 +250,7 @@ private:
/// \brief Represents a region described by entries in the .dynamic table.
struct DynRegionInfo {
- DynRegionInfo() : Addr(0), Size(0), EntSize(0) {}
+ DynRegionInfo() : Addr(nullptr), Size(0), EntSize(0) {}
/// \brief Address in current address space.
const void *Addr;
/// \brief Size in bytes of the region.
@@ -273,19 +274,19 @@ private:
public:
// If the integer is 0, this is an Elf_Verdef*.
// If the integer is 1, this is an Elf_Vernaux*.
- VersionMapEntry() : PointerIntPair<const void*, 1>(NULL, 0) { }
+ VersionMapEntry() : PointerIntPair<const void*, 1>(nullptr, 0) { }
VersionMapEntry(const Elf_Verdef *verdef)
: PointerIntPair<const void*, 1>(verdef, 0) { }
VersionMapEntry(const Elf_Vernaux *vernaux)
: PointerIntPair<const void*, 1>(vernaux, 1) { }
- bool isNull() const { return getPointer() == NULL; }
+ bool isNull() const { return getPointer() == nullptr; }
bool isVerdef() const { return !isNull() && getInt() == 0; }
bool isVernaux() const { return !isNull() && getInt() == 1; }
const Elf_Verdef *getVerdef() const {
- return isVerdef() ? (const Elf_Verdef*)getPointer() : NULL;
+ return isVerdef() ? (const Elf_Verdef*)getPointer() : nullptr;
}
const Elf_Vernaux *getVernaux() const {
- return isVernaux() ? (const Elf_Vernaux*)getPointer() : NULL;
+ return isVernaux() ? (const Elf_Vernaux*)getPointer() : nullptr;
}
};
mutable SmallVector<VersionMapEntry, 16> VersionMap;
@@ -317,6 +318,11 @@ public:
ELFFile(MemoryBuffer *Object, error_code &ec);
+ bool isMipsELF64() const {
+ return Header->e_machine == ELF::EM_MIPS &&
+ Header->getFileClass() == ELF::ELFCLASS64;
+ }
+
bool isMips64EL() const {
return Header->e_machine == ELF::EM_MIPS &&
Header->getFileClass() == ELF::ELFCLASS64 &&
@@ -325,6 +331,9 @@ public:
Elf_Shdr_Iter begin_sections() const;
Elf_Shdr_Iter end_sections() const;
+ Elf_Shdr_Range sections() const {
+ return make_range(begin_sections(), end_sections());
+ }
Elf_Sym_Iter begin_symbols() const;
Elf_Sym_Iter end_symbols() const;
@@ -338,7 +347,7 @@ public:
if (DynSymRegion.Addr)
return Elf_Sym_Iter(DynSymRegion.EntSize, (const char *)DynSymRegion.Addr,
true);
- return Elf_Sym_Iter(0, 0, true);
+ return Elf_Sym_Iter(0, nullptr, true);
}
Elf_Sym_Iter end_dynamic_symbols() const {
@@ -346,7 +355,7 @@ public:
return Elf_Sym_Iter(DynSymRegion.EntSize,
(const char *)DynSymRegion.Addr + DynSymRegion.Size,
true);
- return Elf_Sym_Iter(0, 0, true);
+ return Elf_Sym_Iter(0, nullptr, true);
}
Elf_Rela_Iter begin_rela(const Elf_Shdr *sec) const {
@@ -478,7 +487,7 @@ void ELFFile<ELFT>::LoadVersionNeeds(const Elf_Shdr *sec) const {
template <class ELFT>
void ELFFile<ELFT>::LoadVersionMap() const {
// If there is no dynamic symtab or version table, there is nothing to do.
- if (DynSymRegion.Addr == NULL || dot_gnu_version_sec == NULL)
+ if (!DynSymRegion.Addr || !dot_gnu_version_sec)
return;
// Has the VersionMap already been loaded?
@@ -510,7 +519,7 @@ ELFFile<ELFT>::getSection(const Elf_Sym *symb) const {
if (symb->st_shndx == ELF::SHN_XINDEX)
return getSection(ExtendedSymbolTable.lookup(symb));
if (symb->st_shndx >= ELF::SHN_LORESERVE)
- return 0;
+ return nullptr;
return getSection(symb->st_shndx);
}
@@ -537,10 +546,16 @@ StringRef ELFFile<ELFT>::getRelocationTypeName(uint32_t Type) const {
template <class ELFT>
void ELFFile<ELFT>::getRelocationTypeName(uint32_t Type,
SmallVectorImpl<char> &Result) const {
- if (!isMips64EL()) {
+ if (!isMipsELF64()) {
StringRef Name = getRelocationTypeName(Type);
Result.append(Name.begin(), Name.end());
} else {
+ // The Mips N64 ABI allows up to three operations to be specified per
+ // relocation record. Unfortunately there's no easy way to test for the
+ // presence of N64 ELFs as they have no special flag that identifies them
+ // as being N64. We can safely assume at the moment that all Mips
+ // ELFCLASS64 ELFs are N64. New Mips64 ABIs should provide enough
+ // information to disambiguate between old vs new ABIs.
uint8_t Type1 = (Type >> 0) & 0xFF;
uint8_t Type2 = (Type >> 8) & 0xFF;
uint8_t Type3 = (Type >> 16) & 0xFF;
@@ -565,7 +580,7 @@ std::pair<const typename ELFFile<ELFT>::Elf_Shdr *,
const typename ELFFile<ELFT>::Elf_Sym *>
ELFFile<ELFT>::getRelocationSymbol(const Elf_Shdr *Sec, const RelT *Rel) const {
if (!Sec->sh_link)
- return std::make_pair((const Elf_Shdr *)0, (const Elf_Sym *)0);
+ return std::make_pair(nullptr, nullptr);
const Elf_Shdr *SymTable = getSection(Sec->sh_link);
return std::make_pair(
SymTable, getEntry<Elf_Sym>(SymTable, Rel->getSymbol(isMips64EL())));
@@ -604,15 +619,15 @@ typename ELFFile<ELFT>::uintX_t ELFFile<ELFT>::getStringTableIndex() const {
template <class ELFT>
ELFFile<ELFT>::ELFFile(MemoryBuffer *Object, error_code &ec)
: Buf(Object),
- SectionHeaderTable(0),
- dot_shstrtab_sec(0),
- dot_strtab_sec(0),
- dot_symtab_sec(0),
- SymbolTableSectionHeaderIndex(0),
- dot_gnu_version_sec(0),
- dot_gnu_version_r_sec(0),
- dot_gnu_version_d_sec(0),
- dt_soname(0) {
+ SectionHeaderTable(nullptr),
+ dot_shstrtab_sec(nullptr),
+ dot_strtab_sec(nullptr),
+ dot_symtab_sec(nullptr),
+ SymbolTableSectionHeaderIndex(nullptr),
+ dot_gnu_version_sec(nullptr),
+ dot_gnu_version_r_sec(nullptr),
+ dot_gnu_version_d_sec(nullptr),
+ dt_soname(nullptr) {
const uint64_t FileSize = Buf->getBufferSize();
if (sizeof(Elf_Ehdr) > FileSize)
@@ -641,30 +656,29 @@ ELFFile<ELFT>::ELFFile(MemoryBuffer *Object, error_code &ec)
// Scan sections for special sections.
- for (Elf_Shdr_Iter SecI = begin_sections(), SecE = end_sections();
- SecI != SecE; ++SecI) {
- switch (SecI->sh_type) {
+ for (const Elf_Shdr &Sec : sections()) {
+ switch (Sec.sh_type) {
case ELF::SHT_SYMTAB_SHNDX:
if (SymbolTableSectionHeaderIndex)
// FIXME: Proper error handling.
report_fatal_error("More than one .symtab_shndx!");
- SymbolTableSectionHeaderIndex = &*SecI;
+ SymbolTableSectionHeaderIndex = &Sec;
break;
case ELF::SHT_SYMTAB:
if (dot_symtab_sec)
// FIXME: Proper error handling.
report_fatal_error("More than one .symtab!");
- dot_symtab_sec = &*SecI;
- dot_strtab_sec = getSection(SecI->sh_link);
+ dot_symtab_sec = &Sec;
+ dot_strtab_sec = getSection(Sec.sh_link);
break;
case ELF::SHT_DYNSYM: {
if (DynSymRegion.Addr)
// FIXME: Proper error handling.
report_fatal_error("More than one .dynsym!");
- DynSymRegion.Addr = base() + SecI->sh_offset;
- DynSymRegion.Size = SecI->sh_size;
- DynSymRegion.EntSize = SecI->sh_entsize;
- const Elf_Shdr *DynStr = getSection(SecI->sh_link);
+ DynSymRegion.Addr = base() + Sec.sh_offset;
+ DynSymRegion.Size = Sec.sh_size;
+ DynSymRegion.EntSize = Sec.sh_entsize;
+ const Elf_Shdr *DynStr = getSection(Sec.sh_link);
DynStrRegion.Addr = base() + DynStr->sh_offset;
DynStrRegion.Size = DynStr->sh_size;
DynStrRegion.EntSize = DynStr->sh_entsize;
@@ -674,27 +688,27 @@ ELFFile<ELFT>::ELFFile(MemoryBuffer *Object, error_code &ec)
if (DynamicRegion.Addr)
// FIXME: Proper error handling.
report_fatal_error("More than one .dynamic!");
- DynamicRegion.Addr = base() + SecI->sh_offset;
- DynamicRegion.Size = SecI->sh_size;
- DynamicRegion.EntSize = SecI->sh_entsize;
+ DynamicRegion.Addr = base() + Sec.sh_offset;
+ DynamicRegion.Size = Sec.sh_size;
+ DynamicRegion.EntSize = Sec.sh_entsize;
break;
case ELF::SHT_GNU_versym:
- if (dot_gnu_version_sec != NULL)
+ if (dot_gnu_version_sec != nullptr)
// FIXME: Proper error handling.
report_fatal_error("More than one .gnu.version section!");
- dot_gnu_version_sec = &*SecI;
+ dot_gnu_version_sec = &Sec;
break;
case ELF::SHT_GNU_verdef:
- if (dot_gnu_version_d_sec != NULL)
+ if (dot_gnu_version_d_sec != nullptr)
// FIXME: Proper error handling.
report_fatal_error("More than one .gnu.version_d section!");
- dot_gnu_version_d_sec = &*SecI;
+ dot_gnu_version_d_sec = &Sec;
break;
case ELF::SHT_GNU_verneed:
- if (dot_gnu_version_r_sec != NULL)
+ if (dot_gnu_version_r_sec != nullptr)
// FIXME: Proper error handling.
report_fatal_error("More than one .gnu.version_r section!");
- dot_gnu_version_r_sec = &*SecI;
+ dot_gnu_version_r_sec = &Sec;
break;
}
}
@@ -761,7 +775,7 @@ typename ELFFile<ELFT>::Elf_Shdr_Iter ELFFile<ELFT>::end_sections() const {
template <class ELFT>
typename ELFFile<ELFT>::Elf_Sym_Iter ELFFile<ELFT>::begin_symbols() const {
if (!dot_symtab_sec)
- return Elf_Sym_Iter(0, 0, false);
+ return Elf_Sym_Iter(0, nullptr, false);
return Elf_Sym_Iter(dot_symtab_sec->sh_entsize,
(const char *)base() + dot_symtab_sec->sh_offset, false);
}
@@ -769,7 +783,7 @@ typename ELFFile<ELFT>::Elf_Sym_Iter ELFFile<ELFT>::begin_symbols() const {
template <class ELFT>
typename ELFFile<ELFT>::Elf_Sym_Iter ELFFile<ELFT>::end_symbols() const {
if (!dot_symtab_sec)
- return Elf_Sym_Iter(0, 0, false);
+ return Elf_Sym_Iter(0, nullptr, false);
return Elf_Sym_Iter(dot_symtab_sec->sh_entsize,
(const char *)base() + dot_symtab_sec->sh_offset +
dot_symtab_sec->sh_size,
@@ -782,14 +796,14 @@ ELFFile<ELFT>::begin_dynamic_table() const {
if (DynamicRegion.Addr)
return Elf_Dyn_Iter(DynamicRegion.EntSize,
(const char *)DynamicRegion.Addr);
- return Elf_Dyn_Iter(0, 0);
+ return Elf_Dyn_Iter(0, nullptr);
}
template <class ELFT>
typename ELFFile<ELFT>::Elf_Dyn_Iter
ELFFile<ELFT>::end_dynamic_table(bool NULLEnd) const {
if (!DynamicRegion.Addr)
- return Elf_Dyn_Iter(0, 0);
+ return Elf_Dyn_Iter(0, nullptr);
Elf_Dyn_Iter Ret(DynamicRegion.EntSize,
(const char *)DynamicRegion.Addr + DynamicRegion.Size);
@@ -842,7 +856,7 @@ template <class ELFT>
const typename ELFFile<ELFT>::Elf_Shdr *
ELFFile<ELFT>::getSection(uint32_t index) const {
if (index == 0)
- return 0;
+ return nullptr;
if (!SectionHeaderTable || index >= getNumSections())
// FIXME: Proper error handling.
report_fatal_error("Invalid section index!");
@@ -871,7 +885,7 @@ const char *ELFFile<ELFT>::getString(const Elf_Shdr *section,
template <class ELFT>
const char *ELFFile<ELFT>::getDynamicString(uintX_t Offset) const {
if (!DynStrRegion.Addr || Offset >= DynStrRegion.Size)
- return 0;
+ return nullptr;
return (const char *)DynStrRegion.Addr + Offset;
}
@@ -913,7 +927,7 @@ ErrorOr<StringRef> ELFFile<ELFT>::getSymbolVersion(const Elf_Shdr *section,
const Elf_Sym *symb,
bool &IsDefault) const {
// Handle non-dynamic symbols.
- if (section != DynSymRegion.Addr && section != 0) {
+ if (section != DynSymRegion.Addr && section != nullptr) {
// Non-dynamic symbols can have versions in their names
// A name of the form 'foo@V1' indicates version 'V1', non-default.
// A name of the form 'foo@@V2' indicates version 'V2', default version.
@@ -937,7 +951,7 @@ ErrorOr<StringRef> ELFFile<ELFT>::getSymbolVersion(const Elf_Shdr *section,
}
// This is a dynamic symbol. Look in the GNU symbol version table.
- if (dot_gnu_version_sec == NULL) {
+ if (!dot_gnu_version_sec) {
// No version table.
IsDefault = false;
return StringRef("");
diff --git a/include/llvm/Object/ELFObjectFile.h b/include/llvm/Object/ELFObjectFile.h
index 2958067..302caba 100644
--- a/include/llvm/Object/ELFObjectFile.h
+++ b/include/llvm/Object/ELFObjectFile.h
@@ -89,7 +89,6 @@ protected:
bool &Result) const override;
relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
relocation_iterator section_rel_end(DataRefImpl Sec) const override;
- bool section_rel_empty(DataRefImpl Sec) const override;
section_iterator getRelocatedSection(DataRefImpl Sec) const override;
void moveRelocationNext(DataRefImpl &Rel) const override;
@@ -256,8 +255,7 @@ error_code ELFObjectFile<ELFT>::getSymbolAddress(DataRefImpl Symb,
Result = ESym->st_value;
// Clear the ARM/Thumb indicator flag.
- if (EF.getHeader()->e_machine == ELF::EM_ARM &&
- ESym->getType() == ELF::STT_FUNC)
+ if (Header->e_machine == ELF::EM_ARM && ESym->getType() == ELF::STT_FUNC)
Result &= ~1;
if (Header->e_type == ELF::ET_REL)
@@ -497,12 +495,6 @@ ELFObjectFile<ELFT>::section_rel_end(DataRefImpl Sec) const {
}
template <class ELFT>
-bool ELFObjectFile<ELFT>::section_rel_empty(DataRefImpl Sec) const {
- const Elf_Shdr *S = reinterpret_cast<const Elf_Shdr *>(Sec.p);
- return S->sh_size == 0;
-}
-
-template <class ELFT>
section_iterator
ELFObjectFile<ELFT>::getRelocatedSection(DataRefImpl Sec) const {
if (EF.getHeader()->e_type != ELF::ET_REL)
@@ -563,10 +555,17 @@ ELFObjectFile<ELFT>::getRelocationSymbol(DataRefImpl Rel) const {
template <class ELFT>
error_code ELFObjectFile<ELFT>::getRelocationAddress(DataRefImpl Rel,
uint64_t &Result) const {
- assert((EF.getHeader()->e_type == ELF::ET_EXEC ||
- EF.getHeader()->e_type == ELF::ET_DYN) &&
- "Only executable and shared objects files have relocation addresses");
- Result = getROffset(Rel);
+ uint64_t ROffset = getROffset(Rel);
+ const Elf_Ehdr *Header = EF.getHeader();
+
+ if (Header->e_type == ELF::ET_REL) {
+ const Elf_Shdr *RelocationSec = getRelSection(Rel);
+ const Elf_Shdr *RelocatedSec = EF.getSection(RelocationSec->sh_info);
+ Result = ROffset + RelocatedSec->sh_addr;
+ } else {
+ Result = ROffset;
+ }
+
return object_error::success;
}
diff --git a/include/llvm/Object/ELFYAML.h b/include/llvm/Object/ELFYAML.h
index 1eba660..524e55b 100644
--- a/include/llvm/Object/ELFYAML.h
+++ b/include/llvm/Object/ELFYAML.h
@@ -40,6 +40,7 @@ LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_ELFOSABI)
// Just use 64, since it can hold 32-bit values too.
LLVM_YAML_STRONG_TYPEDEF(uint64_t, ELF_EF)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_SHT)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_REL)
// Just use 64, since it can hold 32-bit values too.
LLVM_YAML_STRONG_TYPEDEF(uint64_t, ELF_SHF)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_STT)
@@ -68,17 +69,42 @@ struct LocalGlobalWeakSymbols {
std::vector<Symbol> Weak;
};
struct Section {
+ enum class SectionKind { RawContent, Relocation };
+ SectionKind Kind;
StringRef Name;
ELF_SHT Type;
ELF_SHF Flags;
llvm::yaml::Hex64 Address;
- object::yaml::BinaryRef Content;
StringRef Link;
+ StringRef Info;
llvm::yaml::Hex64 AddressAlign;
+ Section(SectionKind Kind) : Kind(Kind) {}
+ virtual ~Section();
+};
+struct RawContentSection : Section {
+ object::yaml::BinaryRef Content;
+ llvm::yaml::Hex64 Size;
+ RawContentSection() : Section(SectionKind::RawContent) {}
+ static bool classof(const Section *S) {
+ return S->Kind == SectionKind::RawContent;
+ }
+};
+struct Relocation {
+ llvm::yaml::Hex64 Offset;
+ int64_t Addend;
+ ELF_REL Type;
+ StringRef Symbol;
+};
+struct RelocationSection : Section {
+ std::vector<Relocation> Relocations;
+ RelocationSection() : Section(SectionKind::Relocation) {}
+ static bool classof(const Section *S) {
+ return S->Kind == SectionKind::Relocation;
+ }
};
struct Object {
FileHeader Header;
- std::vector<Section> Sections;
+ std::vector<std::unique_ptr<Section>> Sections;
// Although in reality the symbols reside in a section, it is a lot
// cleaner and nicer if we read them from the YAML as a separate
// top-level key, which automatically ensures that invariants like there
@@ -89,8 +115,9 @@ struct Object {
} // end namespace ELFYAML
} // end namespace llvm
-LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::Section)
+LLVM_YAML_IS_SEQUENCE_VECTOR(std::unique_ptr<llvm::ELFYAML::Section>)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::Symbol)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::Relocation)
namespace llvm {
namespace yaml {
@@ -141,6 +168,11 @@ struct ScalarEnumerationTraits<ELFYAML::ELF_STT> {
};
template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_REL> {
+ static void enumeration(IO &IO, ELFYAML::ELF_REL &Value);
+};
+
+template <>
struct MappingTraits<ELFYAML::FileHeader> {
static void mapping(IO &IO, ELFYAML::FileHeader &FileHdr);
};
@@ -155,9 +187,14 @@ struct MappingTraits<ELFYAML::LocalGlobalWeakSymbols> {
static void mapping(IO &IO, ELFYAML::LocalGlobalWeakSymbols &Symbols);
};
+template <> struct MappingTraits<ELFYAML::Relocation> {
+ static void mapping(IO &IO, ELFYAML::Relocation &Rel);
+};
+
template <>
-struct MappingTraits<ELFYAML::Section> {
- static void mapping(IO &IO, ELFYAML::Section &Section);
+struct MappingTraits<std::unique_ptr<ELFYAML::Section>> {
+ static void mapping(IO &IO, std::unique_ptr<ELFYAML::Section> &Section);
+ static StringRef validate(IO &io, std::unique_ptr<ELFYAML::Section> &Section);
};
template <>
diff --git a/include/llvm/Object/MachO.h b/include/llvm/Object/MachO.h
index f242611..710ad7e 100644
--- a/include/llvm/Object/MachO.h
+++ b/include/llvm/Object/MachO.h
@@ -31,7 +31,7 @@ class DiceRef {
const ObjectFile *OwningObject;
public:
- DiceRef() : OwningObject(NULL) { }
+ DiceRef() : OwningObject(nullptr) { }
DiceRef(DataRefImpl DiceP, const ObjectFile *Owner);
@@ -88,7 +88,6 @@ public:
bool &Result) const override;
relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
relocation_iterator section_rel_end(DataRefImpl Sec) const override;
- bool section_rel_empty(DataRefImpl Sec) const override;
void moveRelocationNext(DataRefImpl &Rel) const override;
error_code getRelocationAddress(DataRefImpl Rel,
@@ -112,6 +111,9 @@ public:
basic_symbol_iterator symbol_begin_impl() const override;
basic_symbol_iterator symbol_end_impl() const override;
+ // MachO specific.
+ basic_symbol_iterator getSymbolByIndex(unsigned Index) const;
+
section_iterator section_begin() const override;
section_iterator section_end() const override;
diff --git a/include/llvm/Object/MachOUniversal.h b/include/llvm/Object/MachOUniversal.h
index 9b1afd2..d27c824 100644
--- a/include/llvm/Object/MachOUniversal.h
+++ b/include/llvm/Object/MachOUniversal.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Object/Binary.h"
+#include "llvm/Object/Archive.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/MachO.h"
@@ -41,7 +42,7 @@ public:
ObjectForArch(const MachOUniversalBinary *Parent, uint32_t Index);
void clear() {
- Parent = 0;
+ Parent = nullptr;
Index = 0;
}
@@ -53,6 +54,8 @@ public:
uint32_t getCPUType() const { return Header.cputype; }
error_code getAsObjectFile(std::unique_ptr<ObjectFile> &Result) const;
+
+ error_code getAsArchive(std::unique_ptr<Archive> &Result) const;
};
class object_iterator {
@@ -83,7 +86,7 @@ public:
return ObjectForArch(this, 0);
}
object_iterator end_objects() const {
- return ObjectForArch(0, 0);
+ return ObjectForArch(nullptr, 0);
}
uint32_t getNumberOfObjects() const { return NumberOfObjects; }
diff --git a/include/llvm/Object/ObjectFile.h b/include/llvm/Object/ObjectFile.h
index 8298b63..10209b9 100644
--- a/include/llvm/Object/ObjectFile.h
+++ b/include/llvm/Object/ObjectFile.h
@@ -38,7 +38,7 @@ class RelocationRef {
const ObjectFile *OwningObject;
public:
- RelocationRef() : OwningObject(NULL) { }
+ RelocationRef() : OwningObject(nullptr) { }
RelocationRef(DataRefImpl RelocationP, const ObjectFile *Owner);
@@ -82,7 +82,7 @@ class SectionRef {
const ObjectFile *OwningObject;
public:
- SectionRef() : OwningObject(NULL) { }
+ SectionRef() : OwningObject(nullptr) { }
SectionRef(DataRefImpl SectionP, const ObjectFile *Owner);
@@ -113,11 +113,10 @@ public:
relocation_iterator relocation_begin() const;
relocation_iterator relocation_end() const;
- typedef iterator_range<relocation_iterator> relocation_iterator_range;
- relocation_iterator_range relocations() const {
- return relocation_iterator_range(relocation_begin(), relocation_end());
+ iterator_range<relocation_iterator> relocations() const {
+ return iterator_range<relocation_iterator>(relocation_begin(),
+ relocation_end());
}
- bool relocation_empty() const;
section_iterator getRelocatedSection() const;
DataRefImpl getRawDataRefImpl() const;
@@ -146,7 +145,6 @@ public:
/// Returns the symbol virtual address (i.e. address at which it will be
/// mapped).
error_code getAddress(uint64_t &Result) const;
- error_code getFileOffset(uint64_t &Result) const;
/// @brief Get the alignment of this symbol as the actual value (not log 2).
error_code getAlignment(uint32_t &Result) const;
error_code getSize(uint64_t &Result) const;
@@ -185,7 +183,7 @@ class LibraryRef {
const ObjectFile *OwningObject;
public:
- LibraryRef() : OwningObject(NULL) { }
+ LibraryRef() : OwningObject(nullptr) { }
LibraryRef(DataRefImpl LibraryP, const ObjectFile *Owner);
@@ -256,7 +254,6 @@ protected:
bool &Result) const = 0;
virtual relocation_iterator section_rel_begin(DataRefImpl Sec) const = 0;
virtual relocation_iterator section_rel_end(DataRefImpl Sec) const = 0;
- virtual bool section_rel_empty(DataRefImpl Sec) const = 0;
virtual section_iterator getRelocatedSection(DataRefImpl Sec) const;
// Same as above for RelocationRef.
@@ -350,42 +347,6 @@ inline error_code SymbolRef::getAddress(uint64_t &Result) const {
return getObject()->getSymbolAddress(getRawDataRefImpl(), Result);
}
-inline error_code SymbolRef::getFileOffset(uint64_t &Result) const {
- uint64_t Address;
- if (error_code EC = getAddress(Address))
- return EC;
- if (Address == UnknownAddressOrSize) {
- Result = UnknownAddressOrSize;
- return object_error::success;
- }
-
- const ObjectFile *Obj = getObject();
- section_iterator SecI(Obj->section_begin());
- if (error_code EC = getSection(SecI))
- return EC;
-
- if (SecI == Obj->section_end()) {
- Result = UnknownAddressOrSize;
- return object_error::success;
- }
-
- uint64_t SectionAddress;
- if (error_code EC = SecI->getAddress(SectionAddress))
- return EC;
-
- uint64_t OffsetInSection = Address - SectionAddress;
-
- StringRef SecContents;
- if (error_code EC = SecI->getContents(SecContents))
- return EC;
-
- // FIXME: this is a hack.
- uint64_t SectionOffset = (uint64_t)SecContents.data() - (uint64_t)Obj->base();
-
- Result = SectionOffset + OffsetInSection;
- return object_error::success;
-}
-
inline error_code SymbolRef::getAlignment(uint32_t &Result) const {
return getObject()->getSymbolAlignment(getRawDataRefImpl(), Result);
}
@@ -491,10 +452,6 @@ inline relocation_iterator SectionRef::relocation_end() const {
return OwningObject->section_rel_end(SectionPimpl);
}
-inline bool SectionRef::relocation_empty() const {
- return OwningObject->section_rel_empty(SectionPimpl);
-}
-
inline section_iterator SectionRef::getRelocatedSection() const {
return OwningObject->getRelocatedSection(SectionPimpl);
}
diff --git a/include/llvm/Object/StringTableBuilder.h b/include/llvm/Object/StringTableBuilder.h
new file mode 100644
index 0000000..c61e216
--- /dev/null
+++ b/include/llvm/Object/StringTableBuilder.h
@@ -0,0 +1,59 @@
+//===-- StringTableBuilder.h - String table building utility ------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_STRINGTABLE_BUILDER_H
+#define LLVM_OBJECT_STRINGTABLE_BUILDER_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include <cassert>
+
+namespace llvm {
+
+/// \brief Utility for building string tables with deduplicated suffixes.
+class StringTableBuilder {
+ SmallString<256> StringTable;
+ StringMap<size_t> StringIndexMap;
+
+public:
+ /// \brief Add a string to the builder. Returns a StringRef to the internal
+ /// copy of s. Can only be used before the table is finalized.
+ StringRef add(StringRef s) {
+ assert(!isFinalized());
+ return StringIndexMap.GetOrCreateValue(s, 0).getKey();
+ }
+
+ /// \brief Analyze the strings and build the final table. No more strings can
+ /// be added after this point.
+ void finalize();
+
+ /// \brief Retrieve the string table data. Can only be used after the table
+ /// is finalized.
+ StringRef data() {
+ assert(isFinalized());
+ return StringTable;
+ }
+
+ /// \brief Get the offest of a string in the string table. Can only be used
+ /// after the table is finalized.
+ size_t getOffset(StringRef s) {
+ assert(isFinalized());
+ assert(StringIndexMap.count(s) && "String is not in table!");
+ return StringIndexMap[s];
+ }
+
+private:
+ bool isFinalized() {
+ return !StringTable.empty();
+ }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/include/llvm/Object/SymbolicFile.h b/include/llvm/Object/SymbolicFile.h
index bead2c3..28400e1 100644
--- a/include/llvm/Object/SymbolicFile.h
+++ b/include/llvm/Object/SymbolicFile.h
@@ -90,7 +90,7 @@ public:
// (e.g. section symbols)
};
- BasicSymbolRef() : OwningObject(NULL) { }
+ BasicSymbolRef() : OwningObject(nullptr) { }
BasicSymbolRef(DataRefImpl SymbolP, const SymbolicFile *Owner);
bool operator==(const BasicSymbolRef &Other) const;
@@ -147,7 +147,8 @@ public:
LLVMContext *Context);
static ErrorOr<SymbolicFile *> createSymbolicFile(MemoryBuffer *Object) {
- return createSymbolicFile(Object, true, sys::fs::file_magic::unknown, 0);
+ return createSymbolicFile(Object, true, sys::fs::file_magic::unknown,
+ nullptr);
}
static ErrorOr<SymbolicFile *> createSymbolicFile(StringRef ObjectPath);
diff --git a/include/llvm/Object/YAML.h b/include/llvm/Object/YAML.h
index 89fe504..1792e8b 100644
--- a/include/llvm/Object/YAML.h
+++ b/include/llvm/Object/YAML.h
@@ -108,6 +108,7 @@ template <> struct ScalarTraits<object::yaml::BinaryRef> {
static void output(const object::yaml::BinaryRef &, void *,
llvm::raw_ostream &);
static StringRef input(StringRef, void *, object::yaml::BinaryRef &);
+ static bool mustQuote(StringRef S) { return needsQuotes(S); }
};
}
diff --git a/include/llvm/Option/Arg.h b/include/llvm/Option/Arg.h
index 6b8ed3f..dcaa540 100644
--- a/include/llvm/Option/Arg.h
+++ b/include/llvm/Option/Arg.h
@@ -27,10 +27,7 @@ class ArgList;
/// \brief A concrete instance of a particular driver option.
///
/// The Arg class encodes just enough information to be able to
-/// derive the argument values efficiently. In addition, Arg
-/// instances have an intrusive double linked list which is used by
-/// ArgList to provide efficient iteration over all instances of a
-/// particular option.
+/// derive the argument values efficiently.
class Arg {
Arg(const Arg &) LLVM_DELETED_FUNCTION;
void operator=(const Arg &) LLVM_DELETED_FUNCTION;
@@ -63,14 +60,14 @@ private:
public:
Arg(const Option Opt, StringRef Spelling, unsigned Index,
- const Arg *BaseArg = 0);
+ const Arg *BaseArg = nullptr);
Arg(const Option Opt, StringRef Spelling, unsigned Index,
- const char *Value0, const Arg *BaseArg = 0);
+ const char *Value0, const Arg *BaseArg = nullptr);
Arg(const Option Opt, StringRef Spelling, unsigned Index,
- const char *Value0, const char *Value1, const Arg *BaseArg = 0);
+ const char *Value0, const char *Value1, const Arg *BaseArg = nullptr);
~Arg();
- const Option getOption() const { return Opt; }
+ const Option &getOption() const { return Opt; }
StringRef getSpelling() const { return Spelling; }
unsigned getIndex() const { return Index; }
diff --git a/include/llvm/Option/ArgList.h b/include/llvm/Option/ArgList.h
index 98ba6ec..ab40a1a 100644
--- a/include/llvm/Option/ArgList.h
+++ b/include/llvm/Option/ArgList.h
@@ -15,6 +15,7 @@
#include "llvm/Option/OptSpecifier.h"
#include "llvm/Option/Option.h"
#include <list>
+#include <memory>
#include <string>
#include <vector>
@@ -105,10 +106,14 @@ private:
arglist_type Args;
protected:
- ArgList();
+ // Default ctor provided explicitly as it is not provided implicitly due to
+ // the presence of the (deleted) copy ctor above.
+ ArgList() { }
+ // Virtual to provide a vtable anchor and because -Wnon-virtua-dtor warns, not
+ // because this type is ever actually destroyed polymorphically.
+ virtual ~ArgList();
public:
- virtual ~ArgList();
/// @name Arg Access
/// @{
@@ -160,16 +165,16 @@ public:
///
/// \p Claim Whether the argument should be claimed, if it exists.
bool hasArgNoClaim(OptSpecifier Id) const {
- return getLastArgNoClaim(Id) != 0;
+ return getLastArgNoClaim(Id) != nullptr;
}
bool hasArg(OptSpecifier Id) const {
- return getLastArg(Id) != 0;
+ return getLastArg(Id) != nullptr;
}
bool hasArg(OptSpecifier Id0, OptSpecifier Id1) const {
- return getLastArg(Id0, Id1) != 0;
+ return getLastArg(Id0, Id1) != nullptr;
}
bool hasArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2) const {
- return getLastArg(Id0, Id1, Id2) != 0;
+ return getLastArg(Id0, Id1, Id2) != nullptr;
}
/// getLastArg - Return the last argument matching \p Id, or null.
@@ -334,7 +339,7 @@ class DerivedArgList : public ArgList {
const InputArgList &BaseArgs;
/// The list of arguments we synthesized.
- mutable arglist_type SynthesizedArgs;
+ mutable SmallVector<std::unique_ptr<Arg>, 16> SynthesizedArgs;
public:
/// Construct a new derived arg list from \p BaseArgs.
@@ -358,9 +363,7 @@ public:
/// AddSynthesizedArg - Add a argument to the list of synthesized arguments
/// (to be freed).
- void AddSynthesizedArg(Arg *A) {
- SynthesizedArgs.push_back(A);
- }
+ void AddSynthesizedArg(Arg *A);
const char *MakeArgString(StringRef Str) const override;
diff --git a/include/llvm/Option/OptSpecifier.h b/include/llvm/Option/OptSpecifier.h
index 02bc6b1..b7caa6e 100644
--- a/include/llvm/Option/OptSpecifier.h
+++ b/include/llvm/Option/OptSpecifier.h
@@ -10,6 +10,8 @@
#ifndef LLVM_OPTION_OPTSPECIFIER_H
#define LLVM_OPTION_OPTSPECIFIER_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
namespace opt {
class Option;
diff --git a/include/llvm/Option/Option.h b/include/llvm/Option/Option.h
index 03d4774..b2cfacb 100644
--- a/include/llvm/Option/Option.h
+++ b/include/llvm/Option/Option.h
@@ -73,7 +73,7 @@ public:
~Option();
bool isValid() const {
- return Info != 0;
+ return Info != nullptr;
}
unsigned getID() const {
diff --git a/include/llvm/Pass.h b/include/llvm/Pass.h
index ff700cf..c2b9f95 100644
--- a/include/llvm/Pass.h
+++ b/include/llvm/Pass.h
@@ -87,7 +87,8 @@ class Pass {
Pass(const Pass &) LLVM_DELETED_FUNCTION;
public:
- explicit Pass(PassKind K, char &pid) : Resolver(0), PassID(&pid), Kind(K) { }
+ explicit Pass(PassKind K, char &pid)
+ : Resolver(nullptr), PassID(&pid), Kind(K) { }
virtual ~Pass();
diff --git a/include/llvm/PassAnalysisSupport.h b/include/llvm/PassAnalysisSupport.h
index a581802..9164305 100644
--- a/include/llvm/PassAnalysisSupport.h
+++ b/include/llvm/PassAnalysisSupport.h
@@ -129,7 +129,7 @@ public:
// Find pass that is implementing PI.
Pass *findImplPass(AnalysisID PI) {
- Pass *ResultPass = 0;
+ Pass *ResultPass = nullptr;
for (unsigned i = 0; i < AnalysisImpls.size() ; ++i) {
if (AnalysisImpls[i].first == PI) {
ResultPass = AnalysisImpls[i].second;
@@ -182,7 +182,7 @@ AnalysisType *Pass::getAnalysisIfAvailable() const {
const void *PI = &AnalysisType::ID;
Pass *ResultPass = Resolver->getAnalysisIfAvailable(PI, true);
- if (ResultPass == 0) return 0;
+ if (!ResultPass) return nullptr;
// Because the AnalysisType may not be a subclass of pass (for
// AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
diff --git a/include/llvm/PassRegistry.h b/include/llvm/PassRegistry.h
index 756b1b8..7f2a014 100644
--- a/include/llvm/PassRegistry.h
+++ b/include/llvm/PassRegistry.h
@@ -37,7 +37,7 @@ class PassRegistry {
void *getImpl() const;
public:
- PassRegistry() : pImpl(0) { }
+ PassRegistry() : pImpl(nullptr) { }
~PassRegistry();
/// getPassRegistry - Access the global registry object, which is
diff --git a/include/llvm/PassSupport.h b/include/llvm/PassSupport.h
index baee77f..8efb45f 100644
--- a/include/llvm/PassSupport.h
+++ b/include/llvm/PassSupport.h
@@ -59,7 +59,7 @@ public:
/// through RegisterPass.
PassInfo(const char *name, const char *arg, const void *pi,
NormalCtor_t normal, bool isCFGOnly, bool is_analysis,
- TargetMachineCtor_t machine = NULL)
+ TargetMachineCtor_t machine = nullptr)
: PassName(name), PassArgument(arg), PassID(pi),
IsCFGOnlyPass(isCFGOnly),
IsAnalysis(is_analysis), IsAnalysisGroup(false), NormalCtor(normal),
@@ -70,8 +70,8 @@ public:
PassInfo(const char *name, const void *pi)
: PassName(name), PassArgument(""), PassID(pi),
IsCFGOnlyPass(false),
- IsAnalysis(false), IsAnalysisGroup(true), NormalCtor(0),
- TargetMachineCtor(0) {}
+ IsAnalysis(false), IsAnalysisGroup(true), NormalCtor(nullptr),
+ TargetMachineCtor(nullptr) {}
/// getPassName - Return the friendly name for the pass, never returns null
///
@@ -256,7 +256,7 @@ class RegisterAGBase : public PassInfo {
public:
RegisterAGBase(const char *Name,
const void *InterfaceID,
- const void *PassID = 0,
+ const void *PassID = nullptr,
bool isDefault = false);
};
diff --git a/include/llvm/ProfileData/InstrProf.h b/include/llvm/ProfileData/InstrProf.h
index d8f3ca6..8457678 100644
--- a/include/llvm/ProfileData/InstrProf.h
+++ b/include/llvm/ProfileData/InstrProf.h
@@ -29,6 +29,7 @@ struct instrprof_error {
bad_magic,
bad_header,
unsupported_version,
+ unsupported_hash_type,
too_large,
truncated,
malformed,
diff --git a/include/llvm/ProfileData/InstrProfReader.h b/include/llvm/ProfileData/InstrProfReader.h
index 2c070b9..3e18c76 100644
--- a/include/llvm/ProfileData/InstrProfReader.h
+++ b/include/llvm/ProfileData/InstrProfReader.h
@@ -16,10 +16,12 @@
#define LLVM_PROFILEDATA_INSTRPROF_READER_H_
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/Endian.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/OnDiskHashTable.h"
#include <iterator>
@@ -29,6 +31,9 @@ class InstrProfReader;
/// Profiling information for a single function.
struct InstrProfRecord {
+ InstrProfRecord() {}
+ InstrProfRecord(StringRef Name, uint64_t Hash, ArrayRef<uint64_t> Counts)
+ : Name(Name), Hash(Hash), Counts(Counts) {}
StringRef Name;
uint64_t Hash;
ArrayRef<uint64_t> Counts;
@@ -160,6 +165,7 @@ private:
const ProfileData *DataEnd;
const uint64_t *CountersStart;
const char *NamesStart;
+ const char *ProfileEnd;
RawInstrProfReader(const TextInstrProfReader &) LLVM_DELETED_FUNCTION;
RawInstrProfReader &operator=(const TextInstrProfReader &)
@@ -173,6 +179,7 @@ public:
error_code readNextRecord(InstrProfRecord &Record) override;
private:
+ error_code readNextHeader(const char *CurrentPos);
error_code readHeader(const RawHeader &Header);
template <class IntT>
IntT swap(IntT Int) const {
@@ -191,6 +198,104 @@ private:
typedef RawInstrProfReader<uint32_t> RawInstrProfReader32;
typedef RawInstrProfReader<uint64_t> RawInstrProfReader64;
+namespace IndexedInstrProf {
+enum class HashT : uint32_t;
+}
+
+/// Trait for lookups into the on-disk hash table for the binary instrprof
+/// format.
+class InstrProfLookupTrait {
+ std::vector<uint64_t> CountBuffer;
+ IndexedInstrProf::HashT HashType;
+public:
+ InstrProfLookupTrait(IndexedInstrProf::HashT HashType) : HashType(HashType) {}
+
+ typedef InstrProfRecord data_type;
+ typedef StringRef internal_key_type;
+ typedef StringRef external_key_type;
+ typedef uint64_t hash_value_type;
+ typedef uint64_t offset_type;
+
+ static bool EqualKey(StringRef A, StringRef B) { return A == B; }
+ static StringRef GetInternalKey(StringRef K) { return K; }
+
+ hash_value_type ComputeHash(StringRef K);
+
+ static std::pair<offset_type, offset_type>
+ ReadKeyDataLength(const unsigned char *&D) {
+ using namespace support;
+ offset_type KeyLen = endian::readNext<offset_type, little, unaligned>(D);
+ offset_type DataLen = endian::readNext<offset_type, little, unaligned>(D);
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ StringRef ReadKey(const unsigned char *D, offset_type N) {
+ return StringRef((const char *)D, N);
+ }
+
+ InstrProfRecord ReadData(StringRef K, const unsigned char *D, offset_type N) {
+ if (N < 2 * sizeof(uint64_t) || N % sizeof(uint64_t)) {
+ // The data is corrupt, don't try to read it.
+ CountBuffer.clear();
+ return InstrProfRecord("", 0, CountBuffer);
+ }
+
+ using namespace support;
+
+ // The first stored value is the hash.
+ uint64_t Hash = endian::readNext<uint64_t, little, unaligned>(D);
+ // Each counter follows.
+ unsigned NumCounters = N / sizeof(uint64_t) - 1;
+ CountBuffer.clear();
+ CountBuffer.reserve(NumCounters - 1);
+ for (unsigned I = 0; I < NumCounters; ++I)
+ CountBuffer.push_back(endian::readNext<uint64_t, little, unaligned>(D));
+
+ return InstrProfRecord(K, Hash, CountBuffer);
+ }
+};
+typedef OnDiskIterableChainedHashTable<InstrProfLookupTrait>
+ InstrProfReaderIndex;
+
+/// Reader for the indexed binary instrprof format.
+class IndexedInstrProfReader : public InstrProfReader {
+private:
+ /// The profile data file contents.
+ std::unique_ptr<MemoryBuffer> DataBuffer;
+ /// The index into the profile data.
+ std::unique_ptr<InstrProfReaderIndex> Index;
+ /// Iterator over the profile data.
+ InstrProfReaderIndex::data_iterator RecordIterator;
+ /// The maximal execution count among all fucntions.
+ uint64_t MaxFunctionCount;
+
+ IndexedInstrProfReader(const IndexedInstrProfReader &) LLVM_DELETED_FUNCTION;
+ IndexedInstrProfReader &operator=(const IndexedInstrProfReader &)
+ LLVM_DELETED_FUNCTION;
+public:
+ IndexedInstrProfReader(std::unique_ptr<MemoryBuffer> DataBuffer)
+ : DataBuffer(std::move(DataBuffer)), Index(nullptr),
+ RecordIterator(InstrProfReaderIndex::data_iterator()) {}
+
+ /// Return true if the given buffer is in an indexed instrprof format.
+ static bool hasFormat(const MemoryBuffer &DataBuffer);
+
+ /// Read the file header.
+ error_code readHeader() override;
+ /// Read a single record.
+ error_code readNextRecord(InstrProfRecord &Record) override;
+
+ /// Fill Counts with the profile data for the given function name.
+ error_code getFunctionCounts(StringRef FuncName, uint64_t &FuncHash,
+ std::vector<uint64_t> &Counts);
+ /// Return the maximum of all known function counts.
+ uint64_t getMaximumFunctionCount() { return MaxFunctionCount; }
+
+ /// Factory method to create an indexed reader.
+ static error_code create(std::string Path,
+ std::unique_ptr<IndexedInstrProfReader> &Result);
+};
+
} // end namespace llvm
#endif // LLVM_PROFILEDATA_INSTRPROF_READER_H_
diff --git a/include/llvm/ProfileData/InstrProfWriter.h b/include/llvm/ProfileData/InstrProfWriter.h
index f818fa0..fa37bf1 100644
--- a/include/llvm/ProfileData/InstrProfWriter.h
+++ b/include/llvm/ProfileData/InstrProfWriter.h
@@ -41,7 +41,7 @@ public:
error_code addFunctionCounts(StringRef FunctionName, uint64_t FunctionHash,
ArrayRef<uint64_t> Counters);
/// Ensure that all data is written to disk.
- void write(raw_ostream &OS);
+ void write(raw_fd_ostream &OS);
};
} // end namespace llvm
diff --git a/include/llvm/Support/ARMBuildAttributes.h b/include/llvm/Support/ARMBuildAttributes.h
index 69732fc..1631200 100644
--- a/include/llvm/Support/ARMBuildAttributes.h
+++ b/include/llvm/Support/ARMBuildAttributes.h
@@ -146,6 +146,19 @@ enum {
AllowNeon2 = 2, // SIMDv2 was permitted (Half-precision FP, MAC operations)
AllowNeonARMv8 = 3, // ARM v8-A SIMD was permitted
+ // Tag_ABI_PCS_RW_data, (=15), uleb128
+ AddressRWPCRel = 1, // Address RW static data PC-relative
+ AddressRWSBRel = 2, // Address RW static data SB-relative
+ AddressRWNone = 3, // No RW static data permitted
+
+ // Tag_ABI_PCS_RO_data, (=14), uleb128
+ AddressROPCRel = 1, // Address RO static data PC-relative
+ AddressRONone = 2, // No RO static data permitted
+
+ // Tag_ABI_PCS_GOT_use, (=17), uleb128
+ AddressDirect = 1, // Address imported data directly
+ AddressGOT = 2, // Address imported data indirectly (via GOT)
+
// Tag_ABI_FP_denormal, (=20), uleb128
PreserveFPSign = 2, // sign when flushed-to-zero is preserved
diff --git a/include/llvm/Support/Allocator.h b/include/llvm/Support/Allocator.h
index 0641322..7a7e4c0 100644
--- a/include/llvm/Support/Allocator.h
+++ b/include/llvm/Support/Allocator.h
@@ -6,14 +6,22 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file defines the MallocAllocator and BumpPtrAllocator interfaces.
-//
+/// \file
+///
+/// This file defines the MallocAllocator and BumpPtrAllocator interfaces. Both
+/// of these conform to an LLVM "Allocator" concept which consists of an
+/// Allocate method accepting a size and alignment, and a Deallocate accepting
+/// a pointer and size. Further, the LLVM "Allocator" concept has overloads of
+/// Allocate and Deallocate for setting size and alignment based on the final
+/// type. These overloads are typically provided by a base class template \c
+/// AllocatorBase.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_ALLOCATOR_H
#define LLVM_SUPPORT_ALLOCATOR_H
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MathExtras.h"
@@ -24,90 +32,86 @@
#include <cstdlib>
namespace llvm {
-template <typename T> struct ReferenceAdder {
- typedef T &result;
-};
-template <typename T> struct ReferenceAdder<T &> {
- typedef T result;
-};
-class MallocAllocator {
+/// \brief CRTP base class providing obvious overloads for the core \c
+/// Allocate() methods of LLVM-style allocators.
+///
+/// This base class both documents the full public interface exposed by all
+/// LLVM-style allocators, and redirects all of the overloads to a single core
+/// set of methods which the derived class must define.
+template <typename DerivedT> class AllocatorBase {
public:
- MallocAllocator() {}
- ~MallocAllocator() {}
-
- void Reset() {}
-
- void *Allocate(size_t Size, size_t /*Alignment*/) { return malloc(Size); }
-
- template <typename T> T *Allocate() {
- return static_cast<T *>(malloc(sizeof(T)));
+ /// \brief Allocate \a Size bytes of \a Alignment aligned memory. This method
+ /// must be implemented by \c DerivedT.
+ void *Allocate(size_t Size, size_t Alignment) {
+#ifdef __clang__
+ static_assert(static_cast<void *(AllocatorBase::*)(size_t, size_t)>(
+ &AllocatorBase::Allocate) !=
+ static_cast<void *(DerivedT::*)(size_t, size_t)>(
+ &DerivedT::Allocate),
+ "Class derives from AllocatorBase without implementing the "
+ "core Allocate(size_t, size_t) overload!");
+#endif
+ return static_cast<DerivedT *>(this)->Allocate(Size, Alignment);
}
- template <typename T> T *Allocate(size_t Num) {
- return static_cast<T *>(malloc(sizeof(T) * Num));
+ /// \brief Deallocate \a Ptr to \a Size bytes of memory allocated by this
+ /// allocator.
+ void Deallocate(const void *Ptr, size_t Size) {
+#ifdef __clang__
+ static_assert(static_cast<void (AllocatorBase::*)(const void *, size_t)>(
+ &AllocatorBase::Deallocate) !=
+ static_cast<void (DerivedT::*)(const void *, size_t)>(
+ &DerivedT::Deallocate),
+ "Class derives from AllocatorBase without implementing the "
+ "core Deallocate(void *) overload!");
+#endif
+ return static_cast<DerivedT *>(this)->Deallocate(Ptr, Size);
}
- void Deallocate(const void *Ptr) { free(const_cast<void *>(Ptr)); }
+ // The rest of these methods are helpers that redirect to one of the above
+ // core methods.
- void PrintStats() const {}
-};
+ /// \brief Allocate space for a sequence of objects without constructing them.
+ template <typename T> T *Allocate(size_t Num = 1) {
+ return static_cast<T *>(Allocate(Num * sizeof(T), AlignOf<T>::Alignment));
+ }
-/// MemSlab - This structure lives at the beginning of every slab allocated by
-/// the bump allocator.
-class MemSlab {
-public:
- size_t Size;
- MemSlab *NextPtr;
+ /// \brief Deallocate space for a sequence of objects without constructing them.
+ template <typename T>
+ typename std::enable_if<
+ !std::is_same<typename std::remove_cv<T>::type, void>::value, void>::type
+ Deallocate(T *Ptr, size_t Num = 1) {
+ Deallocate(static_cast<const void *>(Ptr), Num * sizeof(T));
+ }
};
-/// SlabAllocator - This class can be used to parameterize the underlying
-/// allocation strategy for the bump allocator. In particular, this is used
-/// by the JIT to allocate contiguous swathes of executable memory. The
-/// interface uses MemSlab's instead of void *'s so that the allocator
-/// doesn't have to remember the size of the pointer it allocated.
-class SlabAllocator {
+class MallocAllocator : public AllocatorBase<MallocAllocator> {
public:
- virtual ~SlabAllocator();
- virtual MemSlab *Allocate(size_t Size) = 0;
- virtual void Deallocate(MemSlab *Slab) = 0;
-};
+ void Reset() {}
-/// MallocSlabAllocator - The default slab allocator for the bump allocator
-/// is an adapter class for MallocAllocator that just forwards the method
-/// calls and translates the arguments.
-class MallocSlabAllocator : public SlabAllocator {
- /// Allocator - The underlying allocator that we forward to.
- ///
- MallocAllocator Allocator;
+ void *Allocate(size_t Size, size_t /*Alignment*/) { return malloc(Size); }
-public:
- MallocSlabAllocator() : Allocator() {}
- virtual ~MallocSlabAllocator();
- MemSlab *Allocate(size_t Size) override;
- void Deallocate(MemSlab *Slab) override;
-};
+ // Pull in base class overloads.
+ using AllocatorBase<MallocAllocator>::Allocate;
-/// \brief Non-templated base class for the \c BumpPtrAllocatorImpl template.
-class BumpPtrAllocatorBase {
-public:
- void Deallocate(const void * /*Ptr*/) {}
- void PrintStats() const;
+ void Deallocate(const void *Ptr, size_t /*Size*/) {
+ free(const_cast<void *>(Ptr));
+ }
- /// \brief Returns the total physical memory allocated by this allocator.
- size_t getTotalMemory() const;
+ // Pull in base class overloads.
+ using AllocatorBase<MallocAllocator>::Deallocate;
-protected:
- /// \brief The slab that we are currently allocating into.
- MemSlab *CurSlab;
+ void PrintStats() const {}
+};
- /// \brief How many bytes we've allocated.
- ///
- /// Used so that we can compute how much space was wasted.
- size_t BytesAllocated;
+namespace detail {
- BumpPtrAllocatorBase() : CurSlab(0), BytesAllocated(0) {}
-};
+// We call out to an external function to actually print the message as the
+// printing code uses Allocator.h in its implementation.
+void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
+ size_t TotalMemory);
+} // End namespace detail.
/// \brief Allocate memory in an ever growing pool, as if by bump-pointer.
///
@@ -119,11 +123,15 @@ protected:
///
/// Note that this also has a threshold for forcing allocations above a certain
/// size into their own slab.
-template <size_t SlabSize = 4096, size_t SizeThreshold = SlabSize>
-class BumpPtrAllocatorImpl : public BumpPtrAllocatorBase {
- BumpPtrAllocatorImpl(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION;
- void operator=(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION;
-
+///
+/// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
+/// object, which wraps malloc, to allocate memory, but it can be changed to
+/// use a custom allocator.
+template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
+ size_t SizeThreshold = SlabSize>
+class BumpPtrAllocatorImpl
+ : public AllocatorBase<
+ BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold>> {
public:
static_assert(SizeThreshold <= SlabSize,
"The SizeThreshold must be at most the SlabSize to ensure "
@@ -131,26 +139,69 @@ public:
"allocation.");
BumpPtrAllocatorImpl()
- : Allocator(DefaultSlabAllocator), NumSlabs(0) {}
- BumpPtrAllocatorImpl(SlabAllocator &Allocator)
- : Allocator(Allocator), NumSlabs(0) {}
- ~BumpPtrAllocatorImpl() { DeallocateSlabs(CurSlab); }
+ : CurPtr(nullptr), End(nullptr), BytesAllocated(0), Allocator() {}
+ template <typename T>
+ BumpPtrAllocatorImpl(T &&Allocator)
+ : CurPtr(nullptr), End(nullptr), BytesAllocated(0),
+ Allocator(std::forward<T &&>(Allocator)) {}
+
+ // Manually implement a move constructor as we must clear the old allocators
+ // slabs as a matter of correctness.
+ BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
+ : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)),
+ CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
+ BytesAllocated(Old.BytesAllocated),
+ Allocator(std::move(Old.Allocator)) {
+ Old.CurPtr = Old.End = nullptr;
+ Old.BytesAllocated = 0;
+ Old.Slabs.clear();
+ Old.CustomSizedSlabs.clear();
+ }
+
+ ~BumpPtrAllocatorImpl() {
+ DeallocateSlabs(Slabs.begin(), Slabs.end());
+ DeallocateCustomSizedSlabs();
+ }
+
+ BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) {
+ DeallocateSlabs(Slabs.begin(), Slabs.end());
+ DeallocateCustomSizedSlabs();
+
+ CurPtr = RHS.CurPtr;
+ End = RHS.End;
+ BytesAllocated = RHS.BytesAllocated;
+ Slabs = std::move(RHS.Slabs);
+ CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
+ Allocator = std::move(RHS.Allocator);
+
+ RHS.CurPtr = RHS.End = nullptr;
+ RHS.BytesAllocated = 0;
+ RHS.Slabs.clear();
+ RHS.CustomSizedSlabs.clear();
+ return *this;
+ }
/// \brief Deallocate all but the current slab and reset the current pointer
/// to the beginning of it, freeing all memory allocated so far.
void Reset() {
- if (!CurSlab)
+ if (Slabs.empty())
return;
- DeallocateSlabs(CurSlab->NextPtr);
- CurSlab->NextPtr = 0;
- CurPtr = (char *)(CurSlab + 1);
- End = ((char *)CurSlab) + CurSlab->Size;
+
+ // Reset the state.
BytesAllocated = 0;
+ CurPtr = (char *)Slabs.front();
+ End = CurPtr + SlabSize;
+
+ // Deallocate all but the first slab, and all custome sized slabs.
+ DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
+ Slabs.erase(std::next(Slabs.begin()), Slabs.end());
+ DeallocateCustomSizedSlabs();
+ CustomSizedSlabs.clear();
}
/// \brief Allocate space at the specified alignment.
void *Allocate(size_t Size, size_t Alignment) {
- if (!CurSlab) // Start a new slab if we haven't allocated one already.
+ if (!CurPtr) // Start a new slab if we haven't allocated one already.
StartNewSlab();
// Keep track of how many bytes we've allocated.
@@ -174,18 +225,13 @@ public:
}
// If Size is really big, allocate a separate slab for it.
- size_t PaddedSize = Size + sizeof(MemSlab) + Alignment - 1;
+ size_t PaddedSize = Size + Alignment - 1;
if (PaddedSize > SizeThreshold) {
- ++NumSlabs;
- MemSlab *NewSlab = Allocator.Allocate(PaddedSize);
-
- // Put the new slab after the current slab, since we are not allocating
- // into it.
- NewSlab->NextPtr = CurSlab->NextPtr;
- CurSlab->NextPtr = NewSlab;
+ void *NewSlab = Allocator.Allocate(PaddedSize, 0);
+ CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
- Ptr = alignPtr((char *)(NewSlab + 1), Alignment);
- assert((uintptr_t)Ptr + Size <= (uintptr_t)NewSlab + NewSlab->Size);
+ Ptr = alignPtr((char *)NewSlab, Alignment);
+ assert((uintptr_t)Ptr + Size <= (uintptr_t)NewSlab + PaddedSize);
__msan_allocated_memory(Ptr, Size);
return Ptr;
}
@@ -199,36 +245,31 @@ public:
return Ptr;
}
- /// \brief Allocate space for one object without constructing it.
- template <typename T> T *Allocate() {
- return static_cast<T *>(Allocate(sizeof(T), AlignOf<T>::Alignment));
- }
+ // Pull in base class overloads.
+ using AllocatorBase<BumpPtrAllocatorImpl>::Allocate;
- /// \brief Allocate space for an array of objects without constructing them.
- template <typename T> T *Allocate(size_t Num) {
- return static_cast<T *>(Allocate(Num * sizeof(T), AlignOf<T>::Alignment));
- }
+ void Deallocate(const void * /*Ptr*/, size_t /*Size*/) {}
- /// \brief Allocate space for an array of objects with the specified alignment
- /// and without constructing them.
- template <typename T> T *Allocate(size_t Num, size_t Alignment) {
- // Round EltSize up to the specified alignment.
- size_t EltSize = (sizeof(T) + Alignment - 1) & (-Alignment);
- return static_cast<T *>(Allocate(Num * EltSize, Alignment));
- }
+ // Pull in base class overloads.
+ using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate;
- size_t GetNumSlabs() const { return NumSlabs; }
+ size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
-private:
- /// \brief The default allocator used if one is not provided.
- MallocSlabAllocator DefaultSlabAllocator;
+ size_t getTotalMemory() const {
+ size_t TotalMemory = 0;
+ for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
+ TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
+ for (auto &PtrAndSize : CustomSizedSlabs)
+ TotalMemory += PtrAndSize.second;
+ return TotalMemory;
+ }
- /// \brief The underlying allocator we use to get slabs of memory.
- ///
- /// This defaults to MallocSlabAllocator, which wraps malloc, but it could be
- /// changed to use a custom allocator.
- SlabAllocator &Allocator;
+ void PrintStats() const {
+ detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
+ getTotalMemory());
+ }
+private:
/// \brief The current pointer into the current slab.
///
/// This points to the next free byte in the slab.
@@ -237,46 +278,67 @@ private:
/// \brief The end of the current slab.
char *End;
- /// \brief How many slabs we've allocated.
+ /// \brief The slabs allocated so far.
+ SmallVector<void *, 4> Slabs;
+
+ /// \brief Custom-sized slabs allocated for too-large allocation requests.
+ SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
+
+ /// \brief How many bytes we've allocated.
///
- /// Used to scale the size of each slab and reduce the number of allocations
- /// for extremely heavy memory use scenarios.
- size_t NumSlabs;
+ /// Used so that we can compute how much space was wasted.
+ size_t BytesAllocated;
- /// \brief Allocate a new slab and move the bump pointers over into the new
- /// slab, modifying CurPtr and End.
- void StartNewSlab() {
- ++NumSlabs;
+ /// \brief The allocator instance we use to get slabs of memory.
+ AllocatorT Allocator;
+
+ static size_t computeSlabSize(unsigned SlabIdx) {
// Scale the actual allocated slab size based on the number of slabs
// allocated. Every 128 slabs allocated, we double the allocated size to
// reduce allocation frequency, but saturate at multiplying the slab size by
// 2^30.
- // FIXME: Currently, this count includes special slabs for objects above the
- // size threshold. That will be fixed in a subsequent commit to make the
- // growth even more predictable.
- size_t AllocatedSlabSize =
- SlabSize * ((size_t)1 << std::min<size_t>(30, NumSlabs / 128));
-
- MemSlab *NewSlab = Allocator.Allocate(AllocatedSlabSize);
- NewSlab->NextPtr = CurSlab;
- CurSlab = NewSlab;
- CurPtr = (char *)(CurSlab + 1);
- End = ((char *)CurSlab) + CurSlab->Size;
+ return SlabSize * ((size_t)1 << std::min<size_t>(30, SlabIdx / 128));
}
- /// \brief Deallocate all memory slabs after and including this one.
- void DeallocateSlabs(MemSlab *Slab) {
- while (Slab) {
- MemSlab *NextSlab = Slab->NextPtr;
+ /// \brief Allocate a new slab and move the bump pointers over into the new
+ /// slab, modifying CurPtr and End.
+ void StartNewSlab() {
+ size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
+
+ void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0);
+ Slabs.push_back(NewSlab);
+ CurPtr = (char *)(NewSlab);
+ End = ((char *)NewSlab) + AllocatedSlabSize;
+ }
+
+ /// \brief Deallocate a sequence of slabs.
+ void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
+ SmallVectorImpl<void *>::iterator E) {
+ for (; I != E; ++I) {
+ size_t AllocatedSlabSize =
+ computeSlabSize(std::distance(Slabs.begin(), I));
#ifndef NDEBUG
// Poison the memory so stale pointers crash sooner. Note we must
// preserve the Size and NextPtr fields at the beginning.
- sys::Memory::setRangeWritable(Slab + 1, Slab->Size - sizeof(MemSlab));
- memset(Slab + 1, 0xCD, Slab->Size - sizeof(MemSlab));
+ sys::Memory::setRangeWritable(*I, AllocatedSlabSize);
+ memset(*I, 0xCD, AllocatedSlabSize);
#endif
- Allocator.Deallocate(Slab);
- Slab = NextSlab;
- --NumSlabs;
+ Allocator.Deallocate(*I, AllocatedSlabSize);
+ }
+ }
+
+ /// \brief Deallocate all memory for custom sized slabs.
+ void DeallocateCustomSizedSlabs() {
+ for (auto &PtrAndSize : CustomSizedSlabs) {
+ void *Ptr = PtrAndSize.first;
+ size_t Size = PtrAndSize.second;
+#ifndef NDEBUG
+ // Poison the memory so stale pointers crash sooner. Note we must
+ // preserve the Size and NextPtr fields at the beginning.
+ sys::Memory::setRangeWritable(Ptr, Size);
+ memset(Ptr, 0xCD, Size);
+#endif
+ Allocator.Deallocate(Ptr, Size);
}
}
@@ -297,25 +359,42 @@ template <typename T> class SpecificBumpPtrAllocator {
public:
SpecificBumpPtrAllocator() : Allocator() {}
- SpecificBumpPtrAllocator(SlabAllocator &allocator) : Allocator(allocator) {}
-
+ SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
+ : Allocator(std::move(Old.Allocator)) {}
~SpecificBumpPtrAllocator() { DestroyAll(); }
+ SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) {
+ Allocator = std::move(RHS.Allocator);
+ return *this;
+ }
+
/// Call the destructor of each allocated object and deallocate all but the
/// current slab and reset the current pointer to the beginning of it, freeing
/// all memory allocated so far.
void DestroyAll() {
- MemSlab *Slab = Allocator.CurSlab;
- while (Slab) {
- char *End = Slab == Allocator.CurSlab ? Allocator.CurPtr
- : (char *)Slab + Slab->Size;
- for (char *Ptr = (char *)(Slab + 1); Ptr < End; Ptr += sizeof(T)) {
- Ptr = alignPtr(Ptr, alignOf<T>());
- if (Ptr + sizeof(T) <= End)
- reinterpret_cast<T *>(Ptr)->~T();
- }
- Slab = Slab->NextPtr;
+ auto DestroyElements = [](char *Begin, char *End) {
+ assert(Begin == alignPtr(Begin, alignOf<T>()));
+ for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
+ reinterpret_cast<T *>(Ptr)->~T();
+ };
+
+ for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
+ ++I) {
+ size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
+ std::distance(Allocator.Slabs.begin(), I));
+ char *Begin = alignPtr((char *)*I, alignOf<T>());
+ char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
+ : (char *)*I + AllocatedSlabSize;
+
+ DestroyElements(Begin, End);
+ }
+
+ for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
+ void *Ptr = PtrAndSize.first;
+ size_t Size = PtrAndSize.second;
+ DestroyElements(alignPtr((char *)Ptr, alignOf<T>()), (char *)Ptr + Size);
}
+
Allocator.Reset();
}
@@ -325,10 +404,10 @@ public:
} // end namespace llvm
-template <size_t SlabSize, size_t SizeThreshold>
-void *
-operator new(size_t Size,
- llvm::BumpPtrAllocatorImpl<SlabSize, SizeThreshold> &Allocator) {
+template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
+void *operator new(size_t Size,
+ llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
+ SizeThreshold> &Allocator) {
struct S {
char c;
union {
@@ -342,8 +421,9 @@ operator new(size_t Size,
Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x)));
}
-template <size_t SlabSize, size_t SizeThreshold>
-void operator delete(void *,
- llvm::BumpPtrAllocatorImpl<SlabSize, SizeThreshold> &) {}
+template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
+void operator delete(
+ void *, llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold> &) {
+}
#endif // LLVM_SUPPORT_ALLOCATOR_H
diff --git a/include/llvm/Support/ArrayRecycler.h b/include/llvm/Support/ArrayRecycler.h
index e974332..36f644a 100644
--- a/include/llvm/Support/ArrayRecycler.h
+++ b/include/llvm/Support/ArrayRecycler.h
@@ -44,10 +44,10 @@ class ArrayRecycler {
// Return NULL if no entries are available.
T *pop(unsigned Idx) {
if (Idx >= Bucket.size())
- return 0;
+ return nullptr;
FreeList *Entry = Bucket[Idx];
if (!Entry)
- return 0;
+ return nullptr;
Bucket[Idx] = Entry->Next;
return reinterpret_cast<T*>(Entry);
}
diff --git a/include/llvm/Support/BlockFrequency.h b/include/llvm/Support/BlockFrequency.h
index dae520b..4304a25 100644
--- a/include/llvm/Support/BlockFrequency.h
+++ b/include/llvm/Support/BlockFrequency.h
@@ -23,14 +23,8 @@ class BranchProbability;
// This class represents Block Frequency as a 64-bit value.
class BlockFrequency {
-
uint64_t Frequency;
- /// \brief Scale the given BlockFrequency by N/D. Return the remainder from
- /// the division by D. Upon overflow, the routine will saturate and
- /// additionally will return the remainder set to D.
- uint32_t scale(uint32_t N, uint32_t D);
-
public:
BlockFrequency(uint64_t Freq = 0) : Frequency(Freq) { }
@@ -58,10 +52,6 @@ public:
/// \brief Shift block frequency to the right by count digits saturating to 1.
BlockFrequency &operator>>=(const unsigned count);
- /// \brief Scale the given BlockFrequency by N/D. Return the remainder from
- /// the division by D. Upon overflow, the routine will saturate.
- uint32_t scale(const BranchProbability &Prob);
-
bool operator<(const BlockFrequency &RHS) const {
return Frequency < RHS.Frequency;
}
diff --git a/include/llvm/Support/BranchProbability.h b/include/llvm/Support/BranchProbability.h
index eedf692..9aab6ac 100644
--- a/include/llvm/Support/BranchProbability.h
+++ b/include/llvm/Support/BranchProbability.h
@@ -46,10 +46,26 @@ public:
return BranchProbability(D - N, D);
}
- void print(raw_ostream &OS) const;
+ raw_ostream &print(raw_ostream &OS) const;
void dump() const;
+ /// \brief Scale a large integer.
+ ///
+ /// Scales \c Num. Guarantees full precision. Returns the floor of the
+ /// result.
+ ///
+ /// \return \c Num times \c this.
+ uint64_t scale(uint64_t Num) const;
+
+ /// \brief Scale a large integer by the inverse.
+ ///
+ /// Scales \c Num by the inverse of \c this. Guarantees full precision.
+ /// Returns the floor of the result.
+ ///
+ /// \return \c Num divided by \c this.
+ uint64_t scaleByInverse(uint64_t Num) const;
+
bool operator==(BranchProbability RHS) const {
return (uint64_t)N * RHS.D == (uint64_t)D * RHS.N;
}
@@ -59,18 +75,14 @@ public:
bool operator<(BranchProbability RHS) const {
return (uint64_t)N * RHS.D < (uint64_t)D * RHS.N;
}
- bool operator>(BranchProbability RHS) const {
- return RHS < *this;
- }
- bool operator<=(BranchProbability RHS) const {
- return (uint64_t)N * RHS.D <= (uint64_t)D * RHS.N;
- }
- bool operator>=(BranchProbability RHS) const {
- return RHS <= *this;
- }
+ bool operator>(BranchProbability RHS) const { return RHS < *this; }
+ bool operator<=(BranchProbability RHS) const { return !(RHS < *this); }
+ bool operator>=(BranchProbability RHS) const { return !(*this < RHS); }
};
-raw_ostream &operator<<(raw_ostream &OS, const BranchProbability &Prob);
+inline raw_ostream &operator<<(raw_ostream &OS, const BranchProbability &Prob) {
+ return Prob.print(OS);
+}
}
diff --git a/include/llvm/Support/COFF.h b/include/llvm/Support/COFF.h
index dca7fc6..f0e5c7d 100644
--- a/include/llvm/Support/COFF.h
+++ b/include/llvm/Support/COFF.h
@@ -275,7 +275,7 @@ namespace COFF {
uint16_t Type;
};
- enum RelocationTypeX86 {
+ enum RelocationTypeI386 {
IMAGE_REL_I386_ABSOLUTE = 0x0000,
IMAGE_REL_I386_DIR16 = 0x0001,
IMAGE_REL_I386_REL16 = 0x0002,
@@ -286,8 +286,10 @@ namespace COFF {
IMAGE_REL_I386_SECREL = 0x000B,
IMAGE_REL_I386_TOKEN = 0x000C,
IMAGE_REL_I386_SECREL7 = 0x000D,
- IMAGE_REL_I386_REL32 = 0x0014,
+ IMAGE_REL_I386_REL32 = 0x0014
+ };
+ enum RelocationTypeAMD64 {
IMAGE_REL_AMD64_ABSOLUTE = 0x0000,
IMAGE_REL_AMD64_ADDR64 = 0x0001,
IMAGE_REL_AMD64_ADDR32 = 0x0002,
diff --git a/include/llvm/Support/Casting.h b/include/llvm/Support/Casting.h
index 689f590..beed31a 100644
--- a/include/llvm/Support/Casting.h
+++ b/include/llvm/Support/Casting.h
@@ -245,7 +245,7 @@ inline typename cast_retty<X, Y *>::ret_type cast(Y *Val) {
template <class X, class Y>
LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y *>::ret_type
cast_or_null(Y *Val) {
- if (Val == 0) return 0;
+ if (!Val) return nullptr;
assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!");
return cast<X>(Val);
}
@@ -263,19 +263,19 @@ template <class X, class Y>
LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if<
!is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>::type
dyn_cast(const Y &Val) {
- return isa<X>(Val) ? cast<X>(Val) : 0;
+ return isa<X>(Val) ? cast<X>(Val) : nullptr;
}
template <class X, class Y>
LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y>::ret_type
dyn_cast(Y &Val) {
- return isa<X>(Val) ? cast<X>(Val) : 0;
+ return isa<X>(Val) ? cast<X>(Val) : nullptr;
}
template <class X, class Y>
LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y *>::ret_type
dyn_cast(Y *Val) {
- return isa<X>(Val) ? cast<X>(Val) : 0;
+ return isa<X>(Val) ? cast<X>(Val) : nullptr;
}
// dyn_cast_or_null<X> - Functionally identical to dyn_cast, except that a null
@@ -284,7 +284,7 @@ dyn_cast(Y *Val) {
template <class X, class Y>
LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y *>::ret_type
dyn_cast_or_null(Y *Val) {
- return (Val && isa<X>(Val)) ? cast<X>(Val) : 0;
+ return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
}
} // End llvm namespace
diff --git a/include/llvm/Support/CommandLine.h b/include/llvm/Support/CommandLine.h
index e49a97e..5cb5501 100644
--- a/include/llvm/Support/CommandLine.h
+++ b/include/llvm/Support/CommandLine.h
@@ -41,14 +41,14 @@ namespace cl {
// ParseCommandLineOptions - Command line option processing entry point.
//
void ParseCommandLineOptions(int argc, const char * const *argv,
- const char *Overview = 0);
+ const char *Overview = nullptr);
//===----------------------------------------------------------------------===//
// ParseEnvironmentOptions - Environment variable option processing alternate
// entry point.
//
void ParseEnvironmentOptions(const char *progName, const char *envvar,
- const char *Overview = 0);
+ const char *Overview = nullptr);
///===---------------------------------------------------------------------===//
/// SetVersionPrinter - Override the default (LLVM specific) version printer
@@ -146,7 +146,7 @@ private:
const char *const Description;
void registerCategory();
public:
- OptionCategory(const char *const Name, const char *const Description = 0)
+ OptionCategory(const char *const Name, const char *const Description = nullptr)
: Name(Name), Description(Description) { registerCategory(); }
const char *getName() const { return Name; }
const char *getDescription() const { return Description; }
@@ -238,7 +238,7 @@ protected:
enum OptionHidden Hidden)
: NumOccurrences(0), Occurrences(OccurrencesFlag), Value(0),
HiddenFlag(Hidden), Formatting(NormalFormatting), Misc(0),
- Position(0), AdditionalVals(0), NextRegistered(0),
+ Position(0), AdditionalVals(0), NextRegistered(nullptr),
ArgStr(""), HelpStr(""), ValueStr(""), Category(&GeneralCategory) {
}
@@ -763,7 +763,7 @@ public:
}
// getValueName - Do not print =<value> at all.
- const char *getValueName() const override { return 0; }
+ const char *getValueName() const override { return nullptr; }
void printOptionDiff(const Option &O, bool V, OptVal Default,
size_t GlobalWidth) const;
@@ -787,7 +787,7 @@ public:
}
// getValueName - Do not print =<value> at all.
- const char *getValueName() const override { return 0; }
+ const char *getValueName() const override { return nullptr; }
void printOptionDiff(const Option &O, boolOrDefault V, OptVal Default,
size_t GlobalWidth) const;
@@ -1063,12 +1063,12 @@ class opt_storage {
OptionValue<DataType> Default;
void check_location() const {
- assert(Location != 0 && "cl::location(...) not specified for a command "
+ assert(Location && "cl::location(...) not specified for a command "
"line option with external storage, "
"or cl::init specified before cl::location()!!");
}
public:
- opt_storage() : Location(0) {}
+ opt_storage() : Location(nullptr) {}
bool setLocation(Option &O, DataType &L) {
if (Location)
@@ -1469,7 +1469,7 @@ class bits_storage {
}
public:
- bits_storage() : Location(0) {}
+ bits_storage() : Location(nullptr) {}
bool setLocation(Option &O, unsigned &L) {
if (Location)
@@ -1664,7 +1664,7 @@ class alias : public Option {
void done() {
if (!hasArgStr())
error("cl::alias must have argument name specified!");
- if (AliasFor == 0)
+ if (!AliasFor)
error("cl::alias must have an cl::aliasopt(option) specified!");
addArgument();
}
@@ -1677,27 +1677,28 @@ public:
// One option...
template<class M0t>
- explicit alias(const M0t &M0) : Option(Optional, Hidden), AliasFor(0) {
+ explicit alias(const M0t &M0) : Option(Optional, Hidden), AliasFor(nullptr) {
apply(M0, this);
done();
}
// Two options...
template<class M0t, class M1t>
- alias(const M0t &M0, const M1t &M1) : Option(Optional, Hidden), AliasFor(0) {
+ alias(const M0t &M0, const M1t &M1)
+ : Option(Optional, Hidden), AliasFor(nullptr) {
apply(M0, this); apply(M1, this);
done();
}
// Three options...
template<class M0t, class M1t, class M2t>
alias(const M0t &M0, const M1t &M1, const M2t &M2)
- : Option(Optional, Hidden), AliasFor(0) {
+ : Option(Optional, Hidden), AliasFor(nullptr) {
apply(M0, this); apply(M1, this); apply(M2, this);
done();
}
// Four options...
template<class M0t, class M1t, class M2t, class M3t>
alias(const M0t &M0, const M1t &M1, const M2t &M2, const M3t &M3)
- : Option(Optional, Hidden), AliasFor(0) {
+ : Option(Optional, Hidden), AliasFor(nullptr) {
apply(M0, this); apply(M1, this); apply(M2, this); apply(M3, this);
done();
}
diff --git a/include/llvm/Support/Compression.h b/include/llvm/Support/Compression.h
index 80eff5c..8152b60 100644
--- a/include/llvm/Support/Compression.h
+++ b/include/llvm/Support/Compression.h
@@ -16,10 +16,10 @@
#include "llvm/Support/DataTypes.h"
#include <memory>
+#include "llvm/ADT/SmallVector.h"
namespace llvm {
-class MemoryBuffer;
class StringRef;
namespace zlib {
@@ -42,12 +42,11 @@ enum Status {
bool isAvailable();
-Status compress(StringRef InputBuffer,
- std::unique_ptr<MemoryBuffer> &CompressedBuffer,
+Status compress(StringRef InputBuffer, SmallVectorImpl<char> &CompressedBuffer,
CompressionLevel Level = DefaultCompression);
Status uncompress(StringRef InputBuffer,
- std::unique_ptr<MemoryBuffer> &UncompressedBuffer,
+ SmallVectorImpl<char> &UncompressedBuffer,
size_t UncompressedSize);
uint32_t crc32(StringRef Buffer);
diff --git a/include/llvm/Support/CrashRecoveryContext.h b/include/llvm/Support/CrashRecoveryContext.h
index 4500efe..c132373 100644
--- a/include/llvm/Support/CrashRecoveryContext.h
+++ b/include/llvm/Support/CrashRecoveryContext.h
@@ -12,11 +12,13 @@
#include <string>
+#include "llvm/ADT/STLExtras.h"
+
namespace llvm {
class StringRef;
class CrashRecoveryContextCleanup;
-
+
/// \brief Crash recovery helper object.
///
/// This class implements support for running operations in a safe context so
@@ -46,21 +48,10 @@ class CrashRecoveryContext {
void *Impl;
CrashRecoveryContextCleanup *head;
- /// An adaptor to convert an arbitrary functor into a void(void*), void* pair.
- template<typename T> struct FunctorAdaptor {
- T Fn;
- static void invoke(void *Data) {
- return static_cast<FunctorAdaptor<T>*>(Data)->Fn();
- }
- typedef void Callback(void*);
- Callback *fn() { return &invoke; }
- void *arg() { return this; }
- };
-
public:
- CrashRecoveryContext() : Impl(0), head(0) {}
+ CrashRecoveryContext() : Impl(nullptr), head(nullptr) {}
~CrashRecoveryContext();
-
+
void registerCleanup(CrashRecoveryContextCleanup *cleanup);
void unregisterCleanup(CrashRecoveryContextCleanup *cleanup);
@@ -86,11 +77,9 @@ public:
/// make as little assumptions as possible about the program state when
/// RunSafely has returned false. Clients can use getBacktrace() to retrieve
/// the backtrace of the crash on failures.
- bool RunSafely(void (*Fn)(void*), void *UserData);
- template<typename Functor>
- bool RunSafely(Functor Fn) {
- FunctorAdaptor<Functor> Adaptor = { Fn };
- return RunSafely(Adaptor.fn(), Adaptor.arg());
+ bool RunSafely(function_ref<void()> Fn);
+ bool RunSafely(void (*Fn)(void*), void *UserData) {
+ return RunSafely([&]() { Fn(UserData); });
}
/// \brief Execute the provide callback function (with the given arguments) in
@@ -98,12 +87,10 @@ public:
/// requested stack size).
///
/// See RunSafely() and llvm_execute_on_thread().
+ bool RunSafelyOnThread(function_ref<void()>, unsigned RequestedStackSize = 0);
bool RunSafelyOnThread(void (*Fn)(void*), void *UserData,
- unsigned RequestedStackSize = 0);
- template<typename Functor>
- bool RunSafelyOnThread(Functor Fn, unsigned RequestedStackSize = 0) {
- FunctorAdaptor<Functor> Adaptor = { Fn };
- return RunSafelyOnThread(Adaptor.fn(), Adaptor.arg(), RequestedStackSize);
+ unsigned RequestedStackSize = 0) {
+ return RunSafelyOnThread([&]() { Fn(UserData); }, RequestedStackSize);
}
/// \brief Explicitly trigger a crash recovery in the current process, and
diff --git a/include/llvm/Support/Debug.h b/include/llvm/Support/Debug.h
index 2702408..e93e6ca 100644
--- a/include/llvm/Support/Debug.h
+++ b/include/llvm/Support/Debug.h
@@ -13,10 +13,12 @@
//
// In particular, just wrap your code with the DEBUG() macro, and it will be
// enabled automatically if you specify '-debug' on the command-line.
-// Alternatively, you can also use the SET_DEBUG_TYPE("foo") macro to specify
-// that your debug code belongs to class "foo". Then, on the command line, you
-// can specify '-debug-only=foo' to enable JUST the debug information for the
-// foo class.
+// Alternatively, you can also define the DEBUG_TYPE macro to "foo" specify
+// that your debug code belongs to class "foo". Be careful that you only do
+// this after including Debug.h and not around any #include of headers. Headers
+// should define and undef the macro acround the code that needs to use the
+// DEBUG() macro. Then, on the command line, you can specify '-debug-only=foo'
+// to enable JUST the debug information for the foo class.
//
// When compiling without assertions, the -debug-* options and all code in
// DEBUG() statements disappears, so it does not affect the runtime of the code.
@@ -30,12 +32,6 @@
namespace llvm {
-/// DEBUG_TYPE macro - Files can specify a DEBUG_TYPE as a string, which causes
-/// all of their DEBUG statements to be activatable with -debug-only=thatstring.
-#ifndef DEBUG_TYPE
-#define DEBUG_TYPE ""
-#endif
-
#ifndef NDEBUG
/// DebugFlag - This boolean is set to true if the '-debug' command line option
/// is specified. This should probably not be referenced directly, instead, use
diff --git a/include/llvm/Support/DynamicLibrary.h b/include/llvm/Support/DynamicLibrary.h
index 1e2d16c..de47be6 100644
--- a/include/llvm/Support/DynamicLibrary.h
+++ b/include/llvm/Support/DynamicLibrary.h
@@ -65,7 +65,7 @@ namespace sys {
/// It is safe to call this function multiple times for the same library.
/// @brief Open a dynamic library permanently.
static DynamicLibrary getPermanentLibrary(const char *filename,
- std::string *errMsg = 0);
+ std::string *errMsg = nullptr);
/// This function permanently loads the dynamic library at the given path.
/// Use this instead of getPermanentLibrary() when you won't need to get
@@ -73,7 +73,7 @@ namespace sys {
///
/// It is safe to call this function multiple times for the same library.
static bool LoadLibraryPermanently(const char *Filename,
- std::string *ErrMsg = 0) {
+ std::string *ErrMsg = nullptr) {
return !getPermanentLibrary(Filename, ErrMsg).isValid();
}
diff --git a/include/llvm/Support/ELF.h b/include/llvm/Support/ELF.h
index 7b10ebd..0b3e55b 100644
--- a/include/llvm/Support/ELF.h
+++ b/include/llvm/Support/ELF.h
@@ -807,6 +807,7 @@ enum : unsigned {
EF_MIPS_CPIC = 0x00000004, // Call object with Position independent code
EF_MIPS_ABI2 = 0x00000020,
EF_MIPS_32BITMODE = 0x00000100,
+ EF_MIPS_NAN2008 = 0x00000400, // Uses IEE 754-2008 NaN encoding
EF_MIPS_ABI_O32 = 0x00001000, // This file follows the first MIPS 32 bit ABI
//ARCH_ASE
@@ -823,11 +824,12 @@ enum : unsigned {
EF_MIPS_ARCH_64 = 0x60000000, // MIPS64 instruction set per linux not elf.h
EF_MIPS_ARCH_32R2 = 0x70000000, // mips32r2
EF_MIPS_ARCH_64R2 = 0x80000000, // mips64r2
+ EF_MIPS_ARCH_32R6 = 0x90000000, // mips32r6
+ EF_MIPS_ARCH_64R6 = 0xa0000000, // mips64r6
EF_MIPS_ARCH = 0xf0000000 // Mask for applying EF_MIPS_ARCH_ variant
};
// ELF Relocation types for Mips
-// .
enum {
R_MIPS_NONE = 0,
R_MIPS_16 = 1,
@@ -880,6 +882,12 @@ enum {
R_MIPS_TLS_TPREL_HI16 = 49,
R_MIPS_TLS_TPREL_LO16 = 50,
R_MIPS_GLOB_DAT = 51,
+ R_MIPS_PC21_S2 = 60,
+ R_MIPS_PC26_S2 = 61,
+ R_MIPS_PC18_S3 = 62,
+ R_MIPS_PC19_S2 = 63,
+ R_MIPS_PCHI16 = 64,
+ R_MIPS_PCLO16 = 65,
R_MIPS16_GOT16 = 102,
R_MIPS16_HI16 = 104,
R_MIPS16_LO16 = 105,
@@ -906,7 +914,11 @@ enum {
// Special values for the st_other field in the symbol table entry for MIPS.
enum {
- STO_MIPS_MICROMIPS = 0x80 // MIPS Specific ISA for MicroMips
+ STO_MIPS_OPTIONAL = 0x04, // Symbol whose definition is optional
+ STO_MIPS_PLT = 0x08, // PLT entry related dynamic table record
+ STO_MIPS_PIC = 0x20, // PIC func in an object mixes PIC/non-PIC
+ STO_MIPS_MICROMIPS = 0x80, // MIPS Specific ISA for MicroMips
+ STO_MIPS_MIPS16 = 0xf0 // MIPS Specific ISA for Mips16
};
// Hexagon Specific e_flags
@@ -1661,6 +1673,7 @@ enum {
DT_LOPROC = 0x70000000, // Start of processor specific tags.
DT_HIPROC = 0x7FFFFFFF, // End of processor specific tags.
+ DT_GNU_HASH = 0x6FFFFEF5, // Reference to the GNU hash table.
DT_RELACOUNT = 0x6FFFFFF9, // ELF32_Rela count.
DT_RELCOUNT = 0x6FFFFFFA, // ELF32_Rel count.
diff --git a/include/llvm/Support/ErrorHandling.h b/include/llvm/Support/ErrorHandling.h
index b948d97..ac3a4d8 100644
--- a/include/llvm/Support/ErrorHandling.h
+++ b/include/llvm/Support/ErrorHandling.h
@@ -47,7 +47,7 @@ namespace llvm {
/// \param user_data - An argument which will be passed to the install error
/// handler.
void install_fatal_error_handler(fatal_error_handler_t handler,
- void *user_data = 0);
+ void *user_data = nullptr);
/// Restores default error handling behaviour.
/// This must not be called between llvm_start_multithreaded() and
@@ -59,7 +59,7 @@ namespace llvm {
/// remove_fatal_error_handler in its destructor.
struct ScopedFatalErrorHandler {
explicit ScopedFatalErrorHandler(fatal_error_handler_t handler,
- void *user_data = 0) {
+ void *user_data = nullptr) {
install_fatal_error_handler(handler, user_data);
}
@@ -86,9 +86,9 @@ namespace llvm {
/// This function calls abort(), and prints the optional message to stderr.
/// Use the llvm_unreachable macro (that adds location info), instead of
/// calling this function directly.
- LLVM_ATTRIBUTE_NORETURN void llvm_unreachable_internal(const char *msg=0,
- const char *file=0,
- unsigned line=0);
+ LLVM_ATTRIBUTE_NORETURN void
+ llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr,
+ unsigned line=0);
}
/// Marks that the current location is not supposed to be reachable.
diff --git a/include/llvm/Support/FileOutputBuffer.h b/include/llvm/Support/FileOutputBuffer.h
index 1884a24..a8a48fa 100644
--- a/include/llvm/Support/FileOutputBuffer.h
+++ b/include/llvm/Support/FileOutputBuffer.h
@@ -14,7 +14,6 @@
#ifndef LLVM_SUPPORT_FILEOUTPUTBUFFER_H
#define LLVM_SUPPORT_FILEOUTPUTBUFFER_H
-#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
@@ -41,9 +40,6 @@ public:
/// buffer of the specified size. When committed, the buffer will be written
/// to the file at the specified path.
static error_code create(StringRef FilePath, size_t Size,
- OwningPtr<FileOutputBuffer> &Result,
- unsigned Flags = 0);
- static error_code create(StringRef FilePath, size_t Size,
std::unique_ptr<FileOutputBuffer> &Result,
unsigned Flags = 0);
diff --git a/include/llvm/Support/FileSystem.h b/include/llvm/Support/FileSystem.h
index b511a8e..806a3e3 100644
--- a/include/llvm/Support/FileSystem.h
+++ b/include/llvm/Support/FileSystem.h
@@ -165,15 +165,30 @@ class file_status
file_type Type;
perms Perms;
public:
- file_status() : Type(file_type::status_error) {}
- file_status(file_type Type) : Type(Type) {}
-
#if defined(LLVM_ON_UNIX)
+ file_status() : fs_st_dev(0), fs_st_ino(0), fs_st_mtime(0),
+ fs_st_uid(0), fs_st_gid(0), fs_st_size(0),
+ Type(file_type::status_error), Perms(perms_not_known) {}
+
+ file_status(file_type Type) : fs_st_dev(0), fs_st_ino(0), fs_st_mtime(0),
+ fs_st_uid(0), fs_st_gid(0), fs_st_size(0), Type(Type),
+ Perms(perms_not_known) {}
+
file_status(file_type Type, perms Perms, dev_t Dev, ino_t Ino, time_t MTime,
uid_t UID, gid_t GID, off_t Size)
: fs_st_dev(Dev), fs_st_ino(Ino), fs_st_mtime(MTime), fs_st_uid(UID),
fs_st_gid(GID), fs_st_size(Size), Type(Type), Perms(Perms) {}
#elif defined(LLVM_ON_WIN32)
+ file_status() : LastWriteTimeHigh(0), LastWriteTimeLow(0),
+ VolumeSerialNumber(0), FileSizeHigh(0), FileSizeLow(0),
+ FileIndexHigh(0), FileIndexLow(0), Type(file_type::status_error),
+ Perms(perms_not_known) {}
+
+ file_status(file_type Type) : LastWriteTimeHigh(0), LastWriteTimeLow(0),
+ VolumeSerialNumber(0), FileSizeHigh(0), FileSizeLow(0),
+ FileIndexHigh(0), FileIndexLow(0), Type(Type),
+ Perms(perms_not_known) {}
+
file_status(file_type Type, uint32_t LastWriteTimeHigh,
uint32_t LastWriteTimeLow, uint32_t VolumeSerialNumber,
uint32_t FileSizeHigh, uint32_t FileSizeLow,
@@ -562,7 +577,7 @@ error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
error_code createUniqueDirectory(const Twine &Prefix,
SmallVectorImpl<char> &ResultPath);
-enum OpenFlags {
+enum OpenFlags : unsigned {
F_None = 0,
/// F_Excl - When opening a file, this flag makes raw_fd_ostream
@@ -814,7 +829,7 @@ public:
}
/// Construct end iterator.
- directory_iterator() : State(0) {}
+ directory_iterator() : State(nullptr) {}
// No operator++ because we need error_code.
directory_iterator &increment(error_code &ec) {
@@ -828,9 +843,9 @@ public:
bool operator==(const directory_iterator &RHS) const {
if (State == RHS.State)
return true;
- if (RHS.State == 0)
+ if (!RHS.State)
return State->CurrentEntry == directory_entry();
- if (State == 0)
+ if (!State)
return RHS.State->CurrentEntry == directory_entry();
return State->CurrentEntry == RHS.State->CurrentEntry;
}
diff --git a/include/llvm/Support/FileUtilities.h b/include/llvm/Support/FileUtilities.h
index 873b8df..3f2f176 100644
--- a/include/llvm/Support/FileUtilities.h
+++ b/include/llvm/Support/FileUtilities.h
@@ -30,7 +30,7 @@ namespace llvm {
int DiffFilesWithTolerance(StringRef FileA,
StringRef FileB,
double AbsTol, double RelTol,
- std::string *Error = 0);
+ std::string *Error = nullptr);
/// FileRemover - This class is a simple object meant to be stack allocated.
diff --git a/include/llvm/Support/FormattedStream.h b/include/llvm/Support/FormattedStream.h
index 78c4809..8137daa 100644
--- a/include/llvm/Support/FormattedStream.h
+++ b/include/llvm/Support/FormattedStream.h
@@ -85,12 +85,12 @@ public:
/// underneath it.
///
formatted_raw_ostream(raw_ostream &Stream, bool Delete = false)
- : raw_ostream(), TheStream(0), DeleteStream(false), Position(0, 0) {
+ : raw_ostream(), TheStream(nullptr), DeleteStream(false), Position(0, 0) {
setStream(Stream, Delete);
}
explicit formatted_raw_ostream()
- : raw_ostream(), TheStream(0), DeleteStream(false), Position(0, 0) {
- Scanned = 0;
+ : raw_ostream(), TheStream(nullptr), DeleteStream(false), Position(0, 0) {
+ Scanned = nullptr;
}
~formatted_raw_ostream() {
@@ -114,7 +114,7 @@ public:
SetUnbuffered();
TheStream->SetUnbuffered();
- Scanned = 0;
+ Scanned = nullptr;
}
/// PadToColumn - Align the output to some column number. If the current
diff --git a/include/llvm/Support/GCOV.h b/include/llvm/Support/GCOV.h
index 902f2db..0cb6cfd 100644
--- a/include/llvm/Support/GCOV.h
+++ b/include/llvm/Support/GCOV.h
@@ -37,9 +37,9 @@ namespace GCOV {
/// GCOVOptions - A struct for passing gcov options between functions.
struct GCOVOptions {
- GCOVOptions(bool A, bool B, bool C, bool F, bool P, bool U)
+ GCOVOptions(bool A, bool B, bool C, bool F, bool P, bool U, bool L, bool N)
: AllBlocks(A), BranchInfo(B), BranchCount(C), FuncCoverage(F),
- PreservePaths(P), UncondBranch(U) {}
+ PreservePaths(P), UncondBranch(U), LongFileNames(L), NoOutput(N) {}
bool AllBlocks;
bool BranchInfo;
@@ -47,6 +47,8 @@ struct GCOVOptions {
bool FuncCoverage;
bool PreservePaths;
bool UncondBranch;
+ bool LongFileNames;
+ bool NoOutput;
};
/// GCOVBuffer - A wrapper around MemoryBuffer to provide GCOV specific
@@ -232,7 +234,6 @@ class GCOVFile {
public:
GCOVFile() : GCNOInitialized(false), Checksum(0), Functions(), RunCount(0),
ProgramCount(0) {}
- ~GCOVFile();
bool readGCNO(GCOVBuffer &Buffer);
bool readGCDA(GCOVBuffer &Buffer);
uint32_t getChecksum() const { return Checksum; }
@@ -242,27 +243,27 @@ private:
bool GCNOInitialized;
GCOV::GCOVVersion Version;
uint32_t Checksum;
- SmallVector<GCOVFunction *, 16> Functions;
+ SmallVector<std::unique_ptr<GCOVFunction>, 16> Functions;
uint32_t RunCount;
uint32_t ProgramCount;
};
/// GCOVEdge - Collects edge information.
struct GCOVEdge {
- GCOVEdge(GCOVBlock *S, GCOVBlock *D): Src(S), Dst(D), Count(0) {}
+ GCOVEdge(GCOVBlock &S, GCOVBlock &D) : Src(S), Dst(D), Count(0) {}
- GCOVBlock *Src;
- GCOVBlock *Dst;
+ GCOVBlock &Src;
+ GCOVBlock &Dst;
uint64_t Count;
};
/// GCOVFunction - Collects function information.
class GCOVFunction {
public:
- typedef SmallVectorImpl<GCOVBlock *>::const_iterator BlockIterator;
+ typedef SmallVectorImpl<std::unique_ptr<GCOVBlock>>::const_iterator
+ BlockIterator;
GCOVFunction(GCOVFile &P) : Parent(P), Ident(0), LineNumber(0) {}
- ~GCOVFunction();
bool readGCNO(GCOVBuffer &Buffer, GCOV::GCOVVersion Version);
bool readGCDA(GCOVBuffer &Buffer, GCOV::GCOVVersion Version);
StringRef getName() const { return Name; }
@@ -283,8 +284,8 @@ private:
uint32_t LineNumber;
StringRef Name;
StringRef Filename;
- SmallVector<GCOVBlock *, 16> Blocks;
- SmallVector<GCOVEdge *, 16> Edges;
+ SmallVector<std::unique_ptr<GCOVBlock>, 16> Blocks;
+ SmallVector<std::unique_ptr<GCOVEdge>, 16> Edges;
};
/// GCOVBlock - Collects block information.
@@ -298,7 +299,7 @@ class GCOVBlock {
struct SortDstEdgesFunctor {
bool operator()(const GCOVEdge *E1, const GCOVEdge *E2) {
- return E1->Dst->Number < E2->Dst->Number;
+ return E1->Dst.Number < E2->Dst.Number;
}
};
public:
@@ -314,13 +315,13 @@ public:
uint64_t getCount() const { return Counter; }
void addSrcEdge(GCOVEdge *Edge) {
- assert(Edge->Dst == this); // up to caller to ensure edge is valid
+ assert(&Edge->Dst == this); // up to caller to ensure edge is valid
SrcEdges.push_back(Edge);
}
void addDstEdge(GCOVEdge *Edge) {
- assert(Edge->Src == this); // up to caller to ensure edge is valid
+ assert(&Edge->Src == this); // up to caller to ensure edge is valid
// Check if adding this edge causes list to become unsorted.
- if (DstEdges.size() && DstEdges.back()->Dst->Number > Edge->Dst->Number)
+ if (DstEdges.size() && DstEdges.back()->Dst.Number > Edge->Dst.Number)
DstEdgesAreSorted = false;
DstEdges.push_back(Edge);
}
@@ -355,8 +356,10 @@ class FileInfo {
typedef DenseMap<uint32_t, BlockVector> BlockLines;
struct LineData {
+ LineData() : LastLine(0) {}
BlockLines Blocks;
FunctionLines Functions;
+ uint32_t LastLine;
};
struct GCOVCoverage {
@@ -378,23 +381,30 @@ public:
Options(Options), LineInfo(), RunCount(0), ProgramCount(0) {}
void addBlockLine(StringRef Filename, uint32_t Line, const GCOVBlock *Block) {
+ if (Line > LineInfo[Filename].LastLine)
+ LineInfo[Filename].LastLine = Line;
LineInfo[Filename].Blocks[Line-1].push_back(Block);
}
void addFunctionLine(StringRef Filename, uint32_t Line,
const GCOVFunction *Function) {
+ if (Line > LineInfo[Filename].LastLine)
+ LineInfo[Filename].LastLine = Line;
LineInfo[Filename].Functions[Line-1].push_back(Function);
}
void setRunCount(uint32_t Runs) { RunCount = Runs; }
void setProgramCount(uint32_t Programs) { ProgramCount = Programs; }
- void print(StringRef GCNOFile, StringRef GCDAFile);
+ void print(StringRef MainFilename, StringRef GCNOFile, StringRef GCDAFile);
+
private:
- void printFunctionSummary(raw_fd_ostream &OS,
+ std::string getCoveragePath(StringRef Filename, StringRef MainFilename);
+ std::unique_ptr<raw_ostream> openCoveragePath(StringRef CoveragePath);
+ void printFunctionSummary(raw_ostream &OS,
const FunctionVector &Funcs) const;
- void printBlockInfo(raw_fd_ostream &OS, const GCOVBlock &Block,
+ void printBlockInfo(raw_ostream &OS, const GCOVBlock &Block,
uint32_t LineIndex, uint32_t &BlockNo) const;
- void printBranchInfo(raw_fd_ostream &OS, const GCOVBlock &Block,
+ void printBranchInfo(raw_ostream &OS, const GCOVBlock &Block,
GCOVCoverage &Coverage, uint32_t &EdgeNo);
- void printUncondBranchInfo(raw_fd_ostream &OS, uint32_t &EdgeNo,
+ void printUncondBranchInfo(raw_ostream &OS, uint32_t &EdgeNo,
uint64_t Count) const;
void printCoverage(const GCOVCoverage &Coverage) const;
diff --git a/include/llvm/Support/GenericDomTree.h b/include/llvm/Support/GenericDomTree.h
index 6878844..e344220 100644
--- a/include/llvm/Support/GenericDomTree.h
+++ b/include/llvm/Support/GenericDomTree.h
@@ -186,9 +186,9 @@ class DominatorTreeBase : public DominatorBase<NodeT> {
assert(isReachableFromEntry(A));
const DomTreeNodeBase<NodeT> *IDom;
- while ((IDom = B->getIDom()) != 0 && IDom != A && IDom != B)
+ while ((IDom = B->getIDom()) != nullptr && IDom != A && IDom != B)
B = IDom; // Walk up the tree
- return IDom != 0;
+ return IDom != nullptr;
}
protected:
@@ -205,7 +205,7 @@ protected:
unsigned Semi;
NodeT *Label;
- InfoRec() : DFSNum(0), Parent(0), Semi(0), Label(0) {}
+ InfoRec() : DFSNum(0), Parent(0), Semi(0), Label(nullptr) {}
};
DenseMap<NodeT*, NodeT*> IDoms;
@@ -224,7 +224,7 @@ protected:
IDoms.clear();
this->Roots.clear();
Vertex.clear();
- RootNode = 0;
+ RootNode = nullptr;
}
// NewBB is split and now it has one successor. Update dominator tree to
@@ -260,7 +260,7 @@ protected:
// Find NewBB's immediate dominator and create new dominator tree node for
// NewBB.
- NodeT *NewBBIDom = 0;
+ NodeT *NewBBIDom = nullptr;
unsigned i = 0;
for (i = 0; i < PredBlocks.size(); ++i)
if (DT.isReachableFromEntry(PredBlocks[i])) {
@@ -344,7 +344,7 @@ public:
void getDescendants(NodeT *R, SmallVectorImpl<NodeT *> &Result) const {
Result.clear();
const DomTreeNodeBase<NodeT> *RN = getNode(R);
- if (RN == NULL)
+ if (!RN)
return; // If R is unreachable, it will not be present in the DOM tree.
SmallVector<const DomTreeNodeBase<NodeT> *, 8> WL;
WL.push_back(RN);
@@ -361,7 +361,7 @@ public:
///
bool properlyDominates(const DomTreeNodeBase<NodeT> *A,
const DomTreeNodeBase<NodeT> *B) const {
- if (A == 0 || B == 0)
+ if (!A || !B)
return false;
if (A == B)
return false;
@@ -453,6 +453,21 @@ public:
DomTreeNodeBase<NodeT> *NodeA = getNode(A);
DomTreeNodeBase<NodeT> *NodeB = getNode(B);
+ // If we have DFS info, then we can avoid all allocations by just querying
+ // it from each IDom. Note that because we call 'dominates' twice above, we
+ // expect to call through this code at most 16 times in a row without
+ // building valid DFS information. This is important as below is a *very*
+ // slow tree walk.
+ if (DFSInfoValid) {
+ DomTreeNodeBase<NodeT> *IDomA = NodeA->getIDom();
+ while (IDomA) {
+ if (NodeB->DominatedBy(IDomA))
+ return IDomA->getBlock();
+ IDomA = IDomA->getIDom();
+ }
+ return nullptr;
+ }
+
// Collect NodeA dominators set.
SmallPtrSet<DomTreeNodeBase<NodeT>*, 16> NodeADoms;
NodeADoms.insert(NodeA);
@@ -471,7 +486,7 @@ public:
IDomB = IDomB->getIDom();
}
- return NULL;
+ return nullptr;
}
const NodeT *findNearestCommonDominator(const NodeT *A, const NodeT *B) {
@@ -489,7 +504,7 @@ public:
/// creates a new node as a child of DomBB dominator node,linking it into
/// the children list of the immediate dominator.
DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) {
- assert(getNode(BB) == 0 && "Block already in dominator tree!");
+ assert(getNode(BB) == nullptr && "Block already in dominator tree!");
DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB);
assert(IDomNode && "Not immediate dominator specified for block!");
DFSInfoValid = false;
@@ -636,7 +651,7 @@ protected:
// immediate dominator.
NodeT *IDom = getIDom(BB);
- assert(IDom || this->DomTreeNodes[NULL]);
+ assert(IDom || this->DomTreeNodes[nullptr]);
DomTreeNodeBase<NodeT> *IDomNode = getNodeForBlock(IDom);
// Add a new tree node for this NodeT, and link it as a child of
@@ -659,14 +674,14 @@ public:
void recalculate(FT& F) {
typedef GraphTraits<FT*> TraitsTy;
reset();
- this->Vertex.push_back(0);
+ this->Vertex.push_back(nullptr);
if (!this->IsPostDominators) {
// Initialize root
NodeT *entry = TraitsTy::getEntryNode(&F);
this->Roots.push_back(entry);
- this->IDoms[entry] = 0;
- this->DomTreeNodes[entry] = 0;
+ this->IDoms[entry] = nullptr;
+ this->DomTreeNodes[entry] = nullptr;
Calculate<FT, NodeT*>(*this, F);
} else {
@@ -677,8 +692,8 @@ public:
addRoot(I);
// Prepopulate maps so that we don't get iterator invalidation issues later.
- this->IDoms[I] = 0;
- this->DomTreeNodes[I] = 0;
+ this->IDoms[I] = nullptr;
+ this->DomTreeNodes[I] = nullptr;
}
Calculate<FT, Inverse<NodeT*> >(*this, F);
diff --git a/include/llvm/Support/GenericDomTreeConstruction.h b/include/llvm/Support/GenericDomTreeConstruction.h
index f6bb8f4..bcba5e0 100644
--- a/include/llvm/Support/GenericDomTreeConstruction.h
+++ b/include/llvm/Support/GenericDomTreeConstruction.h
@@ -156,11 +156,11 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
bool MultipleRoots = (DT.Roots.size() > 1);
if (MultipleRoots) {
typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &BBInfo =
- DT.Info[NULL];
+ DT.Info[nullptr];
BBInfo.DFSNum = BBInfo.Semi = ++N;
- BBInfo.Label = NULL;
+ BBInfo.Label = nullptr;
- DT.Vertex.push_back(NULL); // Vertex[n] = V;
+ DT.Vertex.push_back(nullptr); // Vertex[n] = V;
}
// Step #1: Number blocks in depth-first order and initialize variables used
@@ -249,10 +249,10 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
// one exit block, or it may be the virtual exit (denoted by (BasicBlock *)0)
// which postdominates all real exits if there are multiple exit blocks, or
// an infinite loop.
- typename GraphT::NodeType* Root = !MultipleRoots ? DT.Roots[0] : 0;
+ typename GraphT::NodeType* Root = !MultipleRoots ? DT.Roots[0] : nullptr;
DT.DomTreeNodes[Root] = DT.RootNode =
- new DomTreeNodeBase<typename GraphT::NodeType>(Root, 0);
+ new DomTreeNodeBase<typename GraphT::NodeType>(Root, nullptr);
// Loop over all of the reachable blocks in the function...
for (unsigned i = 2; i <= N; ++i) {
@@ -263,7 +263,7 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
typename GraphT::NodeType* ImmDom = DT.getIDom(W);
- assert(ImmDom || DT.DomTreeNodes[NULL]);
+ assert(ImmDom || DT.DomTreeNodes[nullptr]);
// Get or calculate the node for the immediate dominator
DomTreeNodeBase<typename GraphT::NodeType> *IDomNode =
diff --git a/include/llvm/Support/GraphWriter.h b/include/llvm/Support/GraphWriter.h
index 62547dd..539673a 100644
--- a/include/llvm/Support/GraphWriter.h
+++ b/include/llvm/Support/GraphWriter.h
@@ -259,8 +259,8 @@ public:
/// emitSimpleNode - Outputs a simple (non-record) node
void emitSimpleNode(const void *ID, const std::string &Attr,
- const std::string &Label, unsigned NumEdgeSources = 0,
- const std::vector<std::string> *EdgeSourceLabels = 0) {
+ const std::string &Label, unsigned NumEdgeSources = 0,
+ const std::vector<std::string> *EdgeSourceLabels = nullptr) {
O << "\tNode" << ID << "[ ";
if (!Attr.empty())
O << Attr << ",";
@@ -325,7 +325,10 @@ template <typename GraphType>
std::string WriteGraph(const GraphType &G, const Twine &Name,
bool ShortNames = false, const Twine &Title = "") {
int FD;
- std::string Filename = createGraphFilename(Name, FD);
+ // Windows can't always handle long paths, so limit the length of the name.
+ std::string N = Name.str();
+ N = N.substr(0, std::min<std::size_t>(N.size(), 140));
+ std::string Filename = createGraphFilename(N, FD);
raw_fd_ostream O(FD, /*shouldClose=*/ true);
if (FD == -1) {
diff --git a/include/llvm/Support/LEB128.h b/include/llvm/Support/LEB128.h
index 9ef5fe6..ea76c9b 100644
--- a/include/llvm/Support/LEB128.h
+++ b/include/llvm/Support/LEB128.h
@@ -77,7 +77,7 @@ inline unsigned encodeULEB128(uint64_t Value, uint8_t *p,
/// Utility function to decode a ULEB128 value.
-inline uint64_t decodeULEB128(const uint8_t *p, unsigned *n = 0) {
+inline uint64_t decodeULEB128(const uint8_t *p, unsigned *n = nullptr) {
const uint8_t *orig_p = p;
uint64_t Value = 0;
unsigned Shift = 0;
diff --git a/include/llvm/Support/LineIterator.h b/include/llvm/Support/LineIterator.h
index 7077656..2a58262 100644
--- a/include/llvm/Support/LineIterator.h
+++ b/include/llvm/Support/LineIterator.h
@@ -11,6 +11,7 @@
#define LLVM_SUPPORT_LINEITERATOR_H__
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
#include <iterator>
namespace llvm {
@@ -28,7 +29,7 @@ class MemoryBuffer;
///
/// Note that this iterator requires the buffer to be nul terminated.
class line_iterator
- : public std::iterator<std::forward_iterator_tag, StringRef, ptrdiff_t> {
+ : public std::iterator<std::forward_iterator_tag, StringRef> {
const MemoryBuffer *Buffer;
char CommentMarker;
@@ -37,7 +38,7 @@ class line_iterator
public:
/// \brief Default construct an "end" iterator.
- line_iterator() : Buffer(0) {}
+ line_iterator() : Buffer(nullptr) {}
/// \brief Construct a new iterator around some memory buffer.
explicit line_iterator(const MemoryBuffer &Buffer, char CommentMarker = '\0');
diff --git a/include/llvm/Support/LockFileManager.h b/include/llvm/Support/LockFileManager.h
index 9df8675..523a781 100644
--- a/include/llvm/Support/LockFileManager.h
+++ b/include/llvm/Support/LockFileManager.h
@@ -40,6 +40,16 @@ public:
LFS_Error
};
+ /// \brief Describes the result of waiting for the owner to release the lock.
+ enum WaitForUnlockResult {
+ /// \brief The lock was released successfully.
+ Res_Success,
+ /// \brief Owner died while holding the lock.
+ Res_OwnerDied,
+ /// \brief Reached timeout while waiting for the owner to release the lock.
+ Res_Timeout
+ };
+
private:
SmallString<128> FileName;
SmallString<128> LockFileName;
@@ -67,7 +77,7 @@ public:
operator LockFileState() const { return getState(); }
/// \brief For a shared lock, wait until the owner releases the lock.
- void waitForUnlock();
+ WaitForUnlockResult waitForUnlock();
};
} // end namespace llvm
diff --git a/include/llvm/Support/MachO.h b/include/llvm/Support/MachO.h
index ef06a41..2a0fc7b 100644
--- a/include/llvm/Support/MachO.h
+++ b/include/llvm/Support/MachO.h
@@ -153,27 +153,59 @@ namespace llvm {
enum SectionType : uint32_t {
// Constant masks for the "flags[7:0]" field in llvm::MachO::section and
// llvm::MachO::section_64 (mask "flags" with SECTION_TYPE)
+
+ /// S_REGULAR - Regular section.
S_REGULAR = 0x00u,
+ /// S_ZEROFILL - Zero fill on demand section.
S_ZEROFILL = 0x01u,
+ /// S_CSTRING_LITERALS - Section with literal C strings.
S_CSTRING_LITERALS = 0x02u,
+ /// S_4BYTE_LITERALS - Section with 4 byte literals.
S_4BYTE_LITERALS = 0x03u,
+ /// S_8BYTE_LITERALS - Section with 8 byte literals.
S_8BYTE_LITERALS = 0x04u,
+ /// S_LITERAL_POINTERS - Section with pointers to literals.
S_LITERAL_POINTERS = 0x05u,
+ /// S_NON_LAZY_SYMBOL_POINTERS - Section with non-lazy symbol pointers.
S_NON_LAZY_SYMBOL_POINTERS = 0x06u,
+ /// S_LAZY_SYMBOL_POINTERS - Section with lazy symbol pointers.
S_LAZY_SYMBOL_POINTERS = 0x07u,
+ /// S_SYMBOL_STUBS - Section with symbol stubs, byte size of stub in
+ /// the Reserved2 field.
S_SYMBOL_STUBS = 0x08u,
+ /// S_MOD_INIT_FUNC_POINTERS - Section with only function pointers for
+ /// initialization.
S_MOD_INIT_FUNC_POINTERS = 0x09u,
+ /// S_MOD_TERM_FUNC_POINTERS - Section with only function pointers for
+ /// termination.
S_MOD_TERM_FUNC_POINTERS = 0x0au,
+ /// S_COALESCED - Section contains symbols that are to be coalesced.
S_COALESCED = 0x0bu,
+ /// S_GB_ZEROFILL - Zero fill on demand section (that can be larger than 4
+ /// gigabytes).
S_GB_ZEROFILL = 0x0cu,
+ /// S_INTERPOSING - Section with only pairs of function pointers for
+ /// interposing.
S_INTERPOSING = 0x0du,
+ /// S_16BYTE_LITERALS - Section with only 16 byte literals.
S_16BYTE_LITERALS = 0x0eu,
+ /// S_DTRACE_DOF - Section contains DTrace Object Format.
S_DTRACE_DOF = 0x0fu,
+ /// S_LAZY_DYLIB_SYMBOL_POINTERS - Section with lazy symbol pointers to
+ /// lazy loaded dylibs.
S_LAZY_DYLIB_SYMBOL_POINTERS = 0x10u,
+ /// S_THREAD_LOCAL_REGULAR - Thread local data section.
S_THREAD_LOCAL_REGULAR = 0x11u,
+ /// S_THREAD_LOCAL_ZEROFILL - Thread local zerofill section.
S_THREAD_LOCAL_ZEROFILL = 0x12u,
+ /// S_THREAD_LOCAL_VARIABLES - Section with thread local variable
+ /// structure data.
S_THREAD_LOCAL_VARIABLES = 0x13u,
+ /// S_THREAD_LOCAL_VARIABLE_POINTERS - Section with pointers to thread
+ /// local structures.
S_THREAD_LOCAL_VARIABLE_POINTERS = 0x14u,
+ /// S_THREAD_LOCAL_INIT_FUNCTION_POINTERS - Section with thread local
+ /// variable initialization pointers to functions.
S_THREAD_LOCAL_INIT_FUNCTION_POINTERS = 0x15u,
LAST_KNOWN_SECTION_TYPE = S_THREAD_LOCAL_INIT_FUNCTION_POINTERS
@@ -182,18 +214,34 @@ namespace llvm {
enum : uint32_t {
// Constant masks for the "flags[31:24]" field in llvm::MachO::section and
// llvm::MachO::section_64 (mask "flags" with SECTION_ATTRIBUTES_USR)
+
+ /// S_ATTR_PURE_INSTRUCTIONS - Section contains only true machine
+ /// instructions.
S_ATTR_PURE_INSTRUCTIONS = 0x80000000u,
+ /// S_ATTR_NO_TOC - Section contains coalesced symbols that are not to be
+ /// in a ranlib table of contents.
S_ATTR_NO_TOC = 0x40000000u,
+ /// S_ATTR_STRIP_STATIC_SYMS - Ok to strip static symbols in this section
+ /// in files with the MY_DYLDLINK flag.
S_ATTR_STRIP_STATIC_SYMS = 0x20000000u,
+ /// S_ATTR_NO_DEAD_STRIP - No dead stripping.
S_ATTR_NO_DEAD_STRIP = 0x10000000u,
+ /// S_ATTR_LIVE_SUPPORT - Blocks are live if they reference live blocks.
S_ATTR_LIVE_SUPPORT = 0x08000000u,
+ /// S_ATTR_SELF_MODIFYING_CODE - Used with i386 code stubs written on by
+ /// dyld.
S_ATTR_SELF_MODIFYING_CODE = 0x04000000u,
+ /// S_ATTR_DEBUG - A debug section.
S_ATTR_DEBUG = 0x02000000u,
// Constant masks for the "flags[23:8]" field in llvm::MachO::section and
// llvm::MachO::section_64 (mask "flags" with SECTION_ATTRIBUTES_SYS)
+
+ /// S_ATTR_SOME_INSTRUCTIONS - Section contains some machine instructions.
S_ATTR_SOME_INSTRUCTIONS = 0x00000400u,
+ /// S_ATTR_EXT_RELOC - Section has external relocation entries.
S_ATTR_EXT_RELOC = 0x00000200u,
+ /// S_ATTR_LOC_RELOC - Section has local relocation entries.
S_ATTR_LOC_RELOC = 0x00000100u,
// Constant masks for the value of an indirect symbol in an indirect
diff --git a/include/llvm/Support/ManagedStatic.h b/include/llvm/Support/ManagedStatic.h
index 5587618..1bb8cea 100644
--- a/include/llvm/Support/ManagedStatic.h
+++ b/include/llvm/Support/ManagedStatic.h
@@ -47,7 +47,7 @@ protected:
void RegisterManagedStatic(void *(*creator)(), void (*deleter)(void*)) const;
public:
/// isConstructed - Return true if this object has not been created yet.
- bool isConstructed() const { return Ptr != 0; }
+ bool isConstructed() const { return Ptr != nullptr; }
void destroy() const;
};
diff --git a/include/llvm/Support/Memory.h b/include/llvm/Support/Memory.h
index 8251fcd..0996adb 100644
--- a/include/llvm/Support/Memory.h
+++ b/include/llvm/Support/Memory.h
@@ -28,7 +28,7 @@ namespace sys {
/// @brief Memory block abstraction.
class MemoryBlock {
public:
- MemoryBlock() : Address(0), Size(0) { }
+ MemoryBlock() : Address(nullptr), Size(0) { }
MemoryBlock(void *addr, size_t size) : Address(addr), Size(size) { }
void *base() const { return Address; }
size_t size() const { return Size; }
@@ -120,7 +120,7 @@ namespace sys {
/// @brief Allocate Read/Write/Execute memory.
static MemoryBlock AllocateRWX(size_t NumBytes,
const MemoryBlock *NearBlock,
- std::string *ErrMsg = 0);
+ std::string *ErrMsg = nullptr);
/// This method releases a block of Read/Write/Execute memory that was
/// allocated with the AllocateRWX method. It should not be used to
@@ -129,7 +129,7 @@ namespace sys {
/// On success, this returns false, otherwise it returns true and fills
/// in *ErrMsg.
/// @brief Release Read/Write/Execute memory.
- static bool ReleaseRWX(MemoryBlock &block, std::string *ErrMsg = 0);
+ static bool ReleaseRWX(MemoryBlock &block, std::string *ErrMsg = nullptr);
/// InvalidateInstructionCache - Before the JIT can run a block of code
@@ -140,12 +140,12 @@ namespace sys {
/// setExecutable - Before the JIT can run a block of code, it has to be
/// given read and executable privilege. Return true if it is already r-x
/// or the system is able to change its previlege.
- static bool setExecutable(MemoryBlock &M, std::string *ErrMsg = 0);
+ static bool setExecutable(MemoryBlock &M, std::string *ErrMsg = nullptr);
/// setWritable - When adding to a block of code, the JIT may need
/// to mark a block of code as RW since the protections are on page
/// boundaries, and the JIT internal allocations are not page aligned.
- static bool setWritable(MemoryBlock &M, std::string *ErrMsg = 0);
+ static bool setWritable(MemoryBlock &M, std::string *ErrMsg = nullptr);
/// setRangeExecutable - Mark the page containing a range of addresses
/// as executable.
diff --git a/include/llvm/Support/MemoryBuffer.h b/include/llvm/Support/MemoryBuffer.h
index 578c7e8..5810c47 100644
--- a/include/llvm/Support/MemoryBuffer.h
+++ b/include/llvm/Support/MemoryBuffer.h
@@ -24,7 +24,6 @@
namespace llvm {
class error_code;
-template<class T> class OwningPtr;
/// MemoryBuffer - This interface provides simple read-only access to a block
/// of memory, and provides simple methods for reading files and standard input
@@ -67,34 +66,39 @@ public:
/// MemoryBuffer if successful, otherwise returning null. If FileSize is
/// specified, this means that the client knows that the file exists and that
/// it has the specified size.
- static error_code getFile(Twine Filename, OwningPtr<MemoryBuffer> &Result,
- int64_t FileSize = -1,
- bool RequiresNullTerminator = true);
+ ///
+ /// \param IsVolatileSize Set to true to indicate that the file size may be
+ /// changing, e.g. when libclang tries to parse while the user is
+ /// editing/updating the file.
static error_code getFile(Twine Filename,
std::unique_ptr<MemoryBuffer> &Result,
int64_t FileSize = -1,
- bool RequiresNullTerminator = true);
+ bool RequiresNullTerminator = true,
+ bool IsVolatileSize = false);
/// Given an already-open file descriptor, map some slice of it into a
/// MemoryBuffer. The slice is specified by an \p Offset and \p MapSize.
/// Since this is in the middle of a file, the buffer is not null terminated.
- static error_code getOpenFileSlice(int FD, const char *Filename,
- OwningPtr<MemoryBuffer> &Result,
- uint64_t MapSize, int64_t Offset);
+ ///
+ /// \param IsVolatileSize Set to true to indicate that the file size may be
+ /// changing, e.g. when libclang tries to parse while the user is
+ /// editing/updating the file.
static error_code getOpenFileSlice(int FD, const char *Filename,
std::unique_ptr<MemoryBuffer> &Result,
- uint64_t MapSize, int64_t Offset);
+ uint64_t MapSize, int64_t Offset,
+ bool IsVolatileSize = false);
/// Given an already-open file descriptor, read the file and return a
/// MemoryBuffer.
- static error_code getOpenFile(int FD, const char *Filename,
- OwningPtr<MemoryBuffer> &Result,
- uint64_t FileSize,
- bool RequiresNullTerminator = true);
+ ///
+ /// \param IsVolatileSize Set to true to indicate that the file size may be
+ /// changing, e.g. when libclang tries to parse while the user is
+ /// editing/updating the file.
static error_code getOpenFile(int FD, const char *Filename,
std::unique_ptr<MemoryBuffer> &Result,
uint64_t FileSize,
- bool RequiresNullTerminator = true);
+ bool RequiresNullTerminator = true,
+ bool IsVolatileSize = false);
/// getMemBuffer - Open the specified memory range as a MemoryBuffer. Note
/// that InputData must be null terminated if RequiresNullTerminator is true.
@@ -123,7 +127,6 @@ public:
/// getSTDIN - Read all of stdin into a file buffer, and return it.
/// If an error occurs, this returns null and sets ec.
- static error_code getSTDIN(OwningPtr<MemoryBuffer> &Result);
static error_code getSTDIN(std::unique_ptr<MemoryBuffer> &Result);
@@ -131,9 +134,6 @@ public:
/// if the Filename is "-". If an error occurs, this returns null and sets
/// ec.
static error_code getFileOrSTDIN(StringRef Filename,
- OwningPtr<MemoryBuffer> &Result,
- int64_t FileSize = -1);
- static error_code getFileOrSTDIN(StringRef Filename,
std::unique_ptr<MemoryBuffer> &Result,
int64_t FileSize = -1);
diff --git a/include/llvm/Support/OnDiskHashTable.h b/include/llvm/Support/OnDiskHashTable.h
new file mode 100644
index 0000000..f6d43a4
--- /dev/null
+++ b/include/llvm/Support/OnDiskHashTable.h
@@ -0,0 +1,571 @@
+//===--- OnDiskHashTable.h - On-Disk Hash Table Implementation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Defines facilities for reading and writing on-disk hash tables.
+///
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_SUPPORT_ON_DISK_HASH_TABLE_H
+#define LLVM_SUPPORT_ON_DISK_HASH_TABLE_H
+
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdlib>
+
+namespace llvm {
+
+/// \brief Generates an on disk hash table.
+///
+/// This needs an \c Info that handles storing values into the hash table's
+/// payload and computes the hash for a given key. This should provide the
+/// following interface:
+///
+/// \code
+/// class ExampleInfo {
+/// public:
+/// typedef ExampleKey key_type; // Must be copy constructible
+/// typedef ExampleKey &key_type_ref;
+/// typedef ExampleData data_type; // Must be copy constructible
+/// typedef ExampleData &data_type_ref;
+/// typedef uint32_t hash_value_type; // The type the hash function returns.
+/// typedef uint32_t offset_type; // The type for offsets into the table.
+///
+/// /// Calculate the hash for Key
+/// static hash_value_type ComputeHash(key_type_ref Key);
+/// /// Return the lengths, in bytes, of the given Key/Data pair.
+/// static std::pair<offset_type, offset_type>
+/// EmitKeyDataLength(raw_ostream &Out, key_type_ref Key, data_type_ref Data);
+/// /// Write Key to Out. KeyLen is the length from EmitKeyDataLength.
+/// static void EmitKey(raw_ostream &Out, key_type_ref Key,
+/// offset_type KeyLen);
+/// /// Write Data to Out. DataLen is the length from EmitKeyDataLength.
+/// static void EmitData(raw_ostream &Out, key_type_ref Key,
+/// data_type_ref Data, offset_type DataLen);
+/// };
+/// \endcode
+template <typename Info> class OnDiskChainedHashTableGenerator {
+ /// \brief A single item in the hash table.
+ class Item {
+ public:
+ typename Info::key_type Key;
+ typename Info::data_type Data;
+ Item *Next;
+ const typename Info::hash_value_type Hash;
+
+ Item(typename Info::key_type_ref Key, typename Info::data_type_ref Data,
+ Info &InfoObj)
+ : Key(Key), Data(Data), Next(nullptr), Hash(InfoObj.ComputeHash(Key)) {}
+ };
+
+ typedef typename Info::offset_type offset_type;
+ offset_type NumBuckets;
+ offset_type NumEntries;
+ llvm::SpecificBumpPtrAllocator<Item> BA;
+
+ /// \brief A linked list of values in a particular hash bucket.
+ class Bucket {
+ public:
+ offset_type Off;
+ Item *Head;
+ unsigned Length;
+
+ Bucket() {}
+ };
+
+ Bucket *Buckets;
+
+private:
+ /// \brief Insert an item into the appropriate hash bucket.
+ void insert(Bucket *Buckets, size_t Size, Item *E) {
+ Bucket &B = Buckets[E->Hash & (Size - 1)];
+ E->Next = B.Head;
+ ++B.Length;
+ B.Head = E;
+ }
+
+ /// \brief Resize the hash table, moving the old entries into the new buckets.
+ void resize(size_t NewSize) {
+ Bucket *NewBuckets = (Bucket *)std::calloc(NewSize, sizeof(Bucket));
+ // Populate NewBuckets with the old entries.
+ for (size_t I = 0; I < NumBuckets; ++I)
+ for (Item *E = Buckets[I].Head; E;) {
+ Item *N = E->Next;
+ E->Next = nullptr;
+ insert(NewBuckets, NewSize, E);
+ E = N;
+ }
+
+ free(Buckets);
+ NumBuckets = NewSize;
+ Buckets = NewBuckets;
+ }
+
+public:
+ /// \brief Insert an entry into the table.
+ void insert(typename Info::key_type_ref Key,
+ typename Info::data_type_ref Data) {
+ Info InfoObj;
+ insert(Key, Data, InfoObj);
+ }
+
+ /// \brief Insert an entry into the table.
+ ///
+ /// Uses the provided Info instead of a stack allocated one.
+ void insert(typename Info::key_type_ref Key,
+ typename Info::data_type_ref Data, Info &InfoObj) {
+
+ ++NumEntries;
+ if (4 * NumEntries >= 3 * NumBuckets)
+ resize(NumBuckets * 2);
+ insert(Buckets, NumBuckets, new (BA.Allocate()) Item(Key, Data, InfoObj));
+ }
+
+ /// \brief Emit the table to Out, which must not be at offset 0.
+ offset_type Emit(raw_ostream &Out) {
+ Info InfoObj;
+ return Emit(Out, InfoObj);
+ }
+
+ /// \brief Emit the table to Out, which must not be at offset 0.
+ ///
+ /// Uses the provided Info instead of a stack allocated one.
+ offset_type Emit(raw_ostream &Out, Info &InfoObj) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+
+ // Emit the payload of the table.
+ for (offset_type I = 0; I < NumBuckets; ++I) {
+ Bucket &B = Buckets[I];
+ if (!B.Head)
+ continue;
+
+ // Store the offset for the data of this bucket.
+ B.Off = Out.tell();
+ assert(B.Off && "Cannot write a bucket at offset 0. Please add padding.");
+
+ // Write out the number of items in the bucket.
+ LE.write<uint16_t>(B.Length);
+ assert(B.Length != 0 && "Bucket has a head but zero length?");
+
+ // Write out the entries in the bucket.
+ for (Item *I = B.Head; I; I = I->Next) {
+ LE.write<typename Info::hash_value_type>(I->Hash);
+ const std::pair<offset_type, offset_type> &Len =
+ InfoObj.EmitKeyDataLength(Out, I->Key, I->Data);
+ InfoObj.EmitKey(Out, I->Key, Len.first);
+ InfoObj.EmitData(Out, I->Key, I->Data, Len.second);
+ }
+ }
+
+ // Pad with zeros so that we can start the hashtable at an aligned address.
+ offset_type TableOff = Out.tell();
+ uint64_t N = llvm::OffsetToAlignment(TableOff, alignOf<offset_type>());
+ TableOff += N;
+ while (N--)
+ LE.write<uint8_t>(0);
+
+ // Emit the hashtable itself.
+ LE.write<offset_type>(NumBuckets);
+ LE.write<offset_type>(NumEntries);
+ for (offset_type I = 0; I < NumBuckets; ++I)
+ LE.write<offset_type>(Buckets[I].Off);
+
+ return TableOff;
+ }
+
+ OnDiskChainedHashTableGenerator() {
+ NumEntries = 0;
+ NumBuckets = 64;
+ // Note that we do not need to run the constructors of the individual
+ // Bucket objects since 'calloc' returns bytes that are all 0.
+ Buckets = (Bucket *)std::calloc(NumBuckets, sizeof(Bucket));
+ }
+
+ ~OnDiskChainedHashTableGenerator() { std::free(Buckets); }
+};
+
+/// \brief Provides lookup on an on disk hash table.
+///
+/// This needs an \c Info that handles reading values from the hash table's
+/// payload and computes the hash for a given key. This should provide the
+/// following interface:
+///
+/// \code
+/// class ExampleLookupInfo {
+/// public:
+/// typedef ExampleData data_type;
+/// typedef ExampleInternalKey internal_key_type; // The stored key type.
+/// typedef ExampleKey external_key_type; // The type to pass to find().
+/// typedef uint32_t hash_value_type; // The type the hash function returns.
+/// typedef uint32_t offset_type; // The type for offsets into the table.
+///
+/// /// Compare two keys for equality.
+/// static bool EqualKey(internal_key_type &Key1, internal_key_type &Key2);
+/// /// Calculate the hash for the given key.
+/// static hash_value_type ComputeHash(internal_key_type &IKey);
+/// /// Translate from the semantic type of a key in the hash table to the
+/// /// type that is actually stored and used for hashing and comparisons.
+/// /// The internal and external types are often the same, in which case this
+/// /// can simply return the passed in value.
+/// static const internal_key_type &GetInternalKey(external_key_type &EKey);
+/// /// Read the key and data length from Buffer, leaving it pointing at the
+/// /// following byte.
+/// static std::pair<offset_type, offset_type>
+/// ReadKeyDataLength(const unsigned char *&Buffer);
+/// /// Read the key from Buffer, given the KeyLen as reported from
+/// /// ReadKeyDataLength.
+/// const internal_key_type &ReadKey(const unsigned char *Buffer,
+/// offset_type KeyLen);
+/// /// Read the data for Key from Buffer, given the DataLen as reported from
+/// /// ReadKeyDataLength.
+/// data_type ReadData(StringRef Key, const unsigned char *Buffer,
+/// offset_type DataLen);
+/// };
+/// \endcode
+template <typename Info> class OnDiskChainedHashTable {
+ const typename Info::offset_type NumBuckets;
+ const typename Info::offset_type NumEntries;
+ const unsigned char *const Buckets;
+ const unsigned char *const Base;
+ Info InfoObj;
+
+public:
+ typedef typename Info::internal_key_type internal_key_type;
+ typedef typename Info::external_key_type external_key_type;
+ typedef typename Info::data_type data_type;
+ typedef typename Info::hash_value_type hash_value_type;
+ typedef typename Info::offset_type offset_type;
+
+ OnDiskChainedHashTable(offset_type NumBuckets, offset_type NumEntries,
+ const unsigned char *Buckets,
+ const unsigned char *Base,
+ const Info &InfoObj = Info())
+ : NumBuckets(NumBuckets), NumEntries(NumEntries), Buckets(Buckets),
+ Base(Base), InfoObj(InfoObj) {
+ assert((reinterpret_cast<uintptr_t>(Buckets) & 0x3) == 0 &&
+ "'buckets' must have a 4-byte alignment");
+ }
+
+ offset_type getNumBuckets() const { return NumBuckets; }
+ offset_type getNumEntries() const { return NumEntries; }
+ const unsigned char *getBase() const { return Base; }
+ const unsigned char *getBuckets() const { return Buckets; }
+
+ bool isEmpty() const { return NumEntries == 0; }
+
+ class iterator {
+ internal_key_type Key;
+ const unsigned char *const Data;
+ const offset_type Len;
+ Info *InfoObj;
+
+ public:
+ iterator() : Data(nullptr), Len(0) {}
+ iterator(const internal_key_type K, const unsigned char *D, offset_type L,
+ Info *InfoObj)
+ : Key(K), Data(D), Len(L), InfoObj(InfoObj) {}
+
+ data_type operator*() const { return InfoObj->ReadData(Key, Data, Len); }
+ bool operator==(const iterator &X) const { return X.Data == Data; }
+ bool operator!=(const iterator &X) const { return X.Data != Data; }
+ };
+
+ /// \brief Look up the stored data for a particular key.
+ iterator find(const external_key_type &EKey, Info *InfoPtr = 0) {
+ if (!InfoPtr)
+ InfoPtr = &InfoObj;
+
+ using namespace llvm::support;
+ const internal_key_type &IKey = InfoObj.GetInternalKey(EKey);
+ hash_value_type KeyHash = InfoObj.ComputeHash(IKey);
+
+ // Each bucket is just an offset into the hash table file.
+ offset_type Idx = KeyHash & (NumBuckets - 1);
+ const unsigned char *Bucket = Buckets + sizeof(offset_type) * Idx;
+
+ offset_type Offset = endian::readNext<offset_type, little, aligned>(Bucket);
+ if (Offset == 0)
+ return iterator(); // Empty bucket.
+ const unsigned char *Items = Base + Offset;
+
+ // 'Items' starts with a 16-bit unsigned integer representing the
+ // number of items in this bucket.
+ unsigned Len = endian::readNext<uint16_t, little, unaligned>(Items);
+
+ for (unsigned i = 0; i < Len; ++i) {
+ // Read the hash.
+ hash_value_type ItemHash =
+ endian::readNext<hash_value_type, little, unaligned>(Items);
+
+ // Determine the length of the key and the data.
+ const std::pair<offset_type, offset_type> &L =
+ Info::ReadKeyDataLength(Items);
+ offset_type ItemLen = L.first + L.second;
+
+ // Compare the hashes. If they are not the same, skip the entry entirely.
+ if (ItemHash != KeyHash) {
+ Items += ItemLen;
+ continue;
+ }
+
+ // Read the key.
+ const internal_key_type &X =
+ InfoPtr->ReadKey((const unsigned char *const)Items, L.first);
+
+ // If the key doesn't match just skip reading the value.
+ if (!InfoPtr->EqualKey(X, IKey)) {
+ Items += ItemLen;
+ continue;
+ }
+
+ // The key matches!
+ return iterator(X, Items + L.first, L.second, InfoPtr);
+ }
+
+ return iterator();
+ }
+
+ iterator end() const { return iterator(); }
+
+ Info &getInfoObj() { return InfoObj; }
+
+ /// \brief Create the hash table.
+ ///
+ /// \param Buckets is the beginning of the hash table itself, which follows
+ /// the payload of entire structure. This is the value returned by
+ /// OnDiskHashTableGenerator::Emit.
+ ///
+ /// \param Base is the point from which all offsets into the structure are
+ /// based. This is offset 0 in the stream that was used when Emitting the
+ /// table.
+ static OnDiskChainedHashTable *Create(const unsigned char *Buckets,
+ const unsigned char *const Base,
+ const Info &InfoObj = Info()) {
+ using namespace llvm::support;
+ assert(Buckets > Base);
+ assert((reinterpret_cast<uintptr_t>(Buckets) & 0x3) == 0 &&
+ "buckets should be 4-byte aligned.");
+
+ offset_type NumBuckets =
+ endian::readNext<offset_type, little, aligned>(Buckets);
+ offset_type NumEntries =
+ endian::readNext<offset_type, little, aligned>(Buckets);
+ return new OnDiskChainedHashTable<Info>(NumBuckets, NumEntries, Buckets,
+ Base, InfoObj);
+ }
+};
+
+/// \brief Provides lookup and iteration over an on disk hash table.
+///
+/// \copydetails llvm::OnDiskChainedHashTable
+template <typename Info>
+class OnDiskIterableChainedHashTable : public OnDiskChainedHashTable<Info> {
+ const unsigned char *Payload;
+
+public:
+ typedef OnDiskChainedHashTable<Info> base_type;
+ typedef typename base_type::internal_key_type internal_key_type;
+ typedef typename base_type::external_key_type external_key_type;
+ typedef typename base_type::data_type data_type;
+ typedef typename base_type::hash_value_type hash_value_type;
+ typedef typename base_type::offset_type offset_type;
+
+ OnDiskIterableChainedHashTable(offset_type NumBuckets, offset_type NumEntries,
+ const unsigned char *Buckets,
+ const unsigned char *Payload,
+ const unsigned char *Base,
+ const Info &InfoObj = Info())
+ : base_type(NumBuckets, NumEntries, Buckets, Base, InfoObj),
+ Payload(Payload) {}
+
+ /// \brief Iterates over all of the keys in the table.
+ class key_iterator {
+ const unsigned char *Ptr;
+ offset_type NumItemsInBucketLeft;
+ offset_type NumEntriesLeft;
+ Info *InfoObj;
+
+ public:
+ typedef external_key_type value_type;
+
+ key_iterator(const unsigned char *const Ptr, offset_type NumEntries,
+ Info *InfoObj)
+ : Ptr(Ptr), NumItemsInBucketLeft(0), NumEntriesLeft(NumEntries),
+ InfoObj(InfoObj) {}
+ key_iterator()
+ : Ptr(nullptr), NumItemsInBucketLeft(0), NumEntriesLeft(0),
+ InfoObj(0) {}
+
+ friend bool operator==(const key_iterator &X, const key_iterator &Y) {
+ return X.NumEntriesLeft == Y.NumEntriesLeft;
+ }
+ friend bool operator!=(const key_iterator &X, const key_iterator &Y) {
+ return X.NumEntriesLeft != Y.NumEntriesLeft;
+ }
+
+ key_iterator &operator++() { // Preincrement
+ using namespace llvm::support;
+ if (!NumItemsInBucketLeft) {
+ // 'Items' starts with a 16-bit unsigned integer representing the
+ // number of items in this bucket.
+ NumItemsInBucketLeft =
+ endian::readNext<uint16_t, little, unaligned>(Ptr);
+ }
+ Ptr += sizeof(hash_value_type); // Skip the hash.
+ // Determine the length of the key and the data.
+ const std::pair<offset_type, offset_type> &L =
+ Info::ReadKeyDataLength(Ptr);
+ Ptr += L.first + L.second;
+ assert(NumItemsInBucketLeft);
+ --NumItemsInBucketLeft;
+ assert(NumEntriesLeft);
+ --NumEntriesLeft;
+ return *this;
+ }
+ key_iterator operator++(int) { // Postincrement
+ key_iterator tmp = *this; ++*this; return tmp;
+ }
+
+ value_type operator*() const {
+ const unsigned char *LocalPtr = Ptr;
+ if (!NumItemsInBucketLeft)
+ LocalPtr += 2; // number of items in bucket
+ LocalPtr += sizeof(hash_value_type); // Skip the hash.
+
+ // Determine the length of the key and the data.
+ const std::pair<offset_type, offset_type> &L =
+ Info::ReadKeyDataLength(LocalPtr);
+
+ // Read the key.
+ const internal_key_type &Key = InfoObj->ReadKey(LocalPtr, L.first);
+ return InfoObj->GetExternalKey(Key);
+ }
+ };
+
+ key_iterator key_begin() {
+ return key_iterator(Payload, this->getNumEntries(), &this->getInfoObj());
+ }
+ key_iterator key_end() { return key_iterator(); }
+
+ iterator_range<key_iterator> keys() {
+ return make_range(key_begin(), key_end());
+ }
+
+ /// \brief Iterates over all the entries in the table, returning the data.
+ class data_iterator {
+ const unsigned char *Ptr;
+ offset_type NumItemsInBucketLeft;
+ offset_type NumEntriesLeft;
+ Info *InfoObj;
+
+ public:
+ typedef data_type value_type;
+
+ data_iterator(const unsigned char *const Ptr, offset_type NumEntries,
+ Info *InfoObj)
+ : Ptr(Ptr), NumItemsInBucketLeft(0), NumEntriesLeft(NumEntries),
+ InfoObj(InfoObj) {}
+ data_iterator()
+ : Ptr(nullptr), NumItemsInBucketLeft(0), NumEntriesLeft(0),
+ InfoObj(nullptr) {}
+
+ bool operator==(const data_iterator &X) const {
+ return X.NumEntriesLeft == NumEntriesLeft;
+ }
+ bool operator!=(const data_iterator &X) const {
+ return X.NumEntriesLeft != NumEntriesLeft;
+ }
+
+ data_iterator &operator++() { // Preincrement
+ using namespace llvm::support;
+ if (!NumItemsInBucketLeft) {
+ // 'Items' starts with a 16-bit unsigned integer representing the
+ // number of items in this bucket.
+ NumItemsInBucketLeft =
+ endian::readNext<uint16_t, little, unaligned>(Ptr);
+ }
+ Ptr += sizeof(hash_value_type); // Skip the hash.
+ // Determine the length of the key and the data.
+ const std::pair<offset_type, offset_type> &L =
+ Info::ReadKeyDataLength(Ptr);
+ Ptr += L.first + L.second;
+ assert(NumItemsInBucketLeft);
+ --NumItemsInBucketLeft;
+ assert(NumEntriesLeft);
+ --NumEntriesLeft;
+ return *this;
+ }
+ data_iterator operator++(int) { // Postincrement
+ data_iterator tmp = *this; ++*this; return tmp;
+ }
+
+ value_type operator*() const {
+ const unsigned char *LocalPtr = Ptr;
+ if (!NumItemsInBucketLeft)
+ LocalPtr += 2; // number of items in bucket
+ LocalPtr += sizeof(hash_value_type); // Skip the hash.
+
+ // Determine the length of the key and the data.
+ const std::pair<offset_type, offset_type> &L =
+ Info::ReadKeyDataLength(LocalPtr);
+
+ // Read the key.
+ const internal_key_type &Key = InfoObj->ReadKey(LocalPtr, L.first);
+ return InfoObj->ReadData(Key, LocalPtr + L.first, L.second);
+ }
+ };
+
+ data_iterator data_begin() {
+ return data_iterator(Payload, this->getNumEntries(), &this->getInfoObj());
+ }
+ data_iterator data_end() { return data_iterator(); }
+
+ iterator_range<data_iterator> data() {
+ return make_range(data_begin(), data_end());
+ }
+
+ /// \brief Create the hash table.
+ ///
+ /// \param Buckets is the beginning of the hash table itself, which follows
+ /// the payload of entire structure. This is the value returned by
+ /// OnDiskHashTableGenerator::Emit.
+ ///
+ /// \param Payload is the beginning of the data contained in the table. This
+ /// is Base plus any padding or header data that was stored, ie, the offset
+ /// that the stream was at when calling Emit.
+ ///
+ /// \param Base is the point from which all offsets into the structure are
+ /// based. This is offset 0 in the stream that was used when Emitting the
+ /// table.
+ static OnDiskIterableChainedHashTable *
+ Create(const unsigned char *Buckets, const unsigned char *const Payload,
+ const unsigned char *const Base, const Info &InfoObj = Info()) {
+ using namespace llvm::support;
+ assert(Buckets > Base);
+ assert((reinterpret_cast<uintptr_t>(Buckets) & 0x3) == 0 &&
+ "buckets should be 4-byte aligned.");
+
+ offset_type NumBuckets =
+ endian::readNext<offset_type, little, aligned>(Buckets);
+ offset_type NumEntries =
+ endian::readNext<offset_type, little, aligned>(Buckets);
+ return new OnDiskIterableChainedHashTable<Info>(
+ NumBuckets, NumEntries, Buckets, Payload, Base, InfoObj);
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_ON_DISK_HASH_TABLE_H
diff --git a/include/llvm/Support/Path.h b/include/llvm/Support/Path.h
index ba18529..cf821f0 100644
--- a/include/llvm/Support/Path.h
+++ b/include/llvm/Support/Path.h
@@ -295,6 +295,11 @@ const StringRef extension(StringRef path);
/// @result true if \a value is a path separator character on the host OS
bool is_separator(char value);
+/// @brief Return the preferred separator for this platform.
+///
+/// @result StringRef of the preferred separator, null-terminated.
+const StringRef get_separator();
+
/// @brief Get the typical temporary directory for the system, e.g.,
/// "/var/tmp" or "C:/TEMP"
///
diff --git a/include/llvm/Support/Program.h b/include/llvm/Support/Program.h
index a1067a6..9160b7d 100644
--- a/include/llvm/Support/Program.h
+++ b/include/llvm/Support/Program.h
@@ -87,11 +87,11 @@ struct ProcessInfo {
const char **args, ///< A vector of strings that are passed to the
///< program. The first element should be the name of the program.
///< The list *must* be terminated by a null char* entry.
- const char **env = 0, ///< An optional vector of strings to use for
+ const char **env = nullptr, ///< An optional vector of strings to use for
///< the program's environment. If not provided, the current program's
///< environment will be used.
- const StringRef **redirects = 0, ///< An optional array of pointers to
- ///< paths. If the array is null, no redirection is done. The array
+ const StringRef **redirects = nullptr, ///< An optional array of pointers
+ ///< to paths. If the array is null, no redirection is done. The array
///< should have a size of at least three. The inferior process's
///< stdin(0), stdout(1), and stderr(2) will be redirected to the
///< corresponding paths.
@@ -107,11 +107,11 @@ struct ProcessInfo {
///< of memory can be allocated by process. If memory usage will be
///< higher limit, the child is killed and this call returns. If zero
///< - no memory limit.
- std::string *ErrMsg = 0, ///< If non-zero, provides a pointer to a string
- ///< instance in which error messages will be returned. If the string
- ///< is non-empty upon return an error occurred while invoking the
+ std::string *ErrMsg = nullptr, ///< If non-zero, provides a pointer to a
+ ///< string instance in which error messages will be returned. If the
+ ///< string is non-empty upon return an error occurred while invoking the
///< program.
- bool *ExecutionFailed = 0);
+ bool *ExecutionFailed = nullptr);
/// Similar to ExecuteAndWait, but returns immediately.
/// @returns The \see ProcessInfo of the newly launced process.
@@ -119,9 +119,9 @@ struct ProcessInfo {
/// Wait until the process finished execution or win32 CloseHandle() API on
/// ProcessInfo.ProcessHandle to avoid memory leaks.
ProcessInfo
- ExecuteNoWait(StringRef Program, const char **args, const char **env = 0,
- const StringRef **redirects = 0, unsigned memoryLimit = 0,
- std::string *ErrMsg = 0, bool *ExecutionFailed = 0);
+ ExecuteNoWait(StringRef Program, const char **args, const char **env = nullptr,
+ const StringRef **redirects = nullptr, unsigned memoryLimit = 0,
+ std::string *ErrMsg = nullptr, bool *ExecutionFailed = nullptr);
/// Return true if the given arguments fit within system-specific
/// argument length limits.
@@ -142,9 +142,9 @@ struct ProcessInfo {
///< will perform a non-blocking wait on the child process.
bool WaitUntilTerminates, ///< If true, ignores \p SecondsToWait and waits
///< until child has terminated.
- std::string *ErrMsg = 0 ///< If non-zero, provides a pointer to a string
- ///< instance in which error messages will be returned. If the string
- ///< is non-empty upon return an error occurred while invoking the
+ std::string *ErrMsg = nullptr ///< If non-zero, provides a pointer to a
+ ///< string instance in which error messages will be returned. If the
+ ///< string is non-empty upon return an error occurred while invoking the
///< program.
);
}
diff --git a/include/llvm/Support/Regex.h b/include/llvm/Support/Regex.h
index 2eea369..bf533ca 100644
--- a/include/llvm/Support/Regex.h
+++ b/include/llvm/Support/Regex.h
@@ -55,7 +55,7 @@ namespace llvm {
Regex(Regex &&regex) {
preg = regex.preg;
error = regex.error;
- regex.preg = NULL;
+ regex.preg = nullptr;
}
~Regex();
@@ -75,7 +75,7 @@ namespace llvm {
/// the first group is always the entire pattern.
///
/// This returns true on a successful match.
- bool match(StringRef String, SmallVectorImpl<StringRef> *Matches = 0);
+ bool match(StringRef String, SmallVectorImpl<StringRef> *Matches = nullptr);
/// sub - Return the result of replacing the first match of the regex in
/// \p String with the \p Repl string. Backreferences like "\0" in the
@@ -87,7 +87,8 @@ namespace llvm {
/// \param Error If non-null, any errors in the substitution (invalid
/// backreferences, trailing backslashes) will be recorded as a non-empty
/// string.
- std::string sub(StringRef Repl, StringRef String, std::string *Error = 0);
+ std::string sub(StringRef Repl, StringRef String,
+ std::string *Error = nullptr);
/// \brief If this function returns true, ^Str$ is an extended regular
/// expression that matches Str and only Str.
diff --git a/include/llvm/Support/Registry.h b/include/llvm/Support/Registry.h
index 073becd..b0c2e89 100644
--- a/include/llvm/Support/Registry.h
+++ b/include/llvm/Support/Registry.h
@@ -14,24 +14,27 @@
#ifndef LLVM_SUPPORT_REGISTRY_H
#define LLVM_SUPPORT_REGISTRY_H
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Compiler.h"
+#include <memory>
+
namespace llvm {
/// A simple registry entry which provides only a name, description, and
/// no-argument constructor.
template <typename T>
class SimpleRegistryEntry {
const char *Name, *Desc;
- T *(*Ctor)();
+ std::unique_ptr<T> (*Ctor)();
public:
- SimpleRegistryEntry(const char *N, const char *D, T *(*C)())
+ SimpleRegistryEntry(const char *N, const char *D, std::unique_ptr<T> (*C)())
: Name(N), Desc(D), Ctor(C)
{}
const char *getName() const { return Name; }
const char *getDesc() const { return Desc; }
- T *instantiate() const { return Ctor(); }
+ std::unique_ptr<T> instantiate() const { return Ctor(); }
};
@@ -88,7 +91,7 @@ namespace llvm {
const entry& Val;
public:
- node(const entry& V) : Next(0), Val(V) {
+ node(const entry& V) : Next(nullptr), Val(V) {
if (Tail)
Tail->Next = this;
else
@@ -116,7 +119,7 @@ namespace llvm {
};
static iterator begin() { return iterator(Head); }
- static iterator end() { return iterator(0); }
+ static iterator end() { return iterator(nullptr); }
/// Abstract base class for registry listeners, which are informed when new
@@ -195,7 +198,7 @@ namespace llvm {
entry Entry;
node Node;
- static T *CtorFn() { return new V(); }
+ static std::unique_ptr<T> CtorFn() { return make_unique<V>(); }
public:
Add(const char *Name, const char *Desc)
diff --git a/include/llvm/Support/SMLoc.h b/include/llvm/Support/SMLoc.h
index 0906471..d5b4c57 100644
--- a/include/llvm/Support/SMLoc.h
+++ b/include/llvm/Support/SMLoc.h
@@ -23,9 +23,9 @@ namespace llvm {
class SMLoc {
const char *Ptr;
public:
- SMLoc() : Ptr(0) {}
+ SMLoc() : Ptr(nullptr) {}
- bool isValid() const { return Ptr != 0; }
+ bool isValid() const { return Ptr != nullptr; }
bool operator==(const SMLoc &RHS) const { return RHS.Ptr == Ptr; }
bool operator!=(const SMLoc &RHS) const { return RHS.Ptr != Ptr; }
diff --git a/include/llvm/Support/SaveAndRestore.h b/include/llvm/Support/SaveAndRestore.h
index 6330bec..ef154ac 100644
--- a/include/llvm/Support/SaveAndRestore.h
+++ b/include/llvm/Support/SaveAndRestore.h
@@ -6,10 +6,11 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file provides utility classes that uses RAII to save and restore
-// values.
-//
+///
+/// \file
+/// This file provides utility classes that use RAII to save and restore
+/// values.
+///
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_SAVEANDRESTORE_H
@@ -17,31 +18,32 @@
namespace llvm {
-// SaveAndRestore - A utility class that uses RAII to save and restore
-// the value of a variable.
-template<typename T>
-struct SaveAndRestore {
- SaveAndRestore(T& x) : X(x), old_value(x) {}
- SaveAndRestore(T& x, const T &new_value) : X(x), old_value(x) {
- X = new_value;
+/// A utility class that uses RAII to save and restore the value of a variable.
+template <typename T> struct SaveAndRestore {
+ SaveAndRestore(T &X) : X(X), OldValue(X) {}
+ SaveAndRestore(T &X, const T &NewValue) : X(X), OldValue(X) {
+ X = NewValue;
}
- ~SaveAndRestore() { X = old_value; }
- T get() { return old_value; }
+ ~SaveAndRestore() { X = OldValue; }
+ T get() { return OldValue; }
+
private:
- T& X;
- T old_value;
+ T &X;
+ T OldValue;
};
-// SaveOr - Similar to SaveAndRestore. Operates only on bools; the old
-// value of a variable is saved, and during the dstor the old value is
-// or'ed with the new value.
+/// Similar to \c SaveAndRestore. Operates only on bools; the old value of a
+/// variable is saved, and during the dstor the old value is or'ed with the new
+/// value.
struct SaveOr {
- SaveOr(bool& x) : X(x), old_value(x) { x = false; }
- ~SaveOr() { X |= old_value; }
+ SaveOr(bool &X) : X(X), OldValue(X) { X = false; }
+ ~SaveOr() { X |= OldValue; }
+
private:
- bool& X;
- const bool old_value;
+ bool &X;
+ const bool OldValue;
};
-}
+} // namespace llvm
+
#endif
diff --git a/include/llvm/Support/Signals.h b/include/llvm/Support/Signals.h
index 58ed175..6cbc1f6 100644
--- a/include/llvm/Support/Signals.h
+++ b/include/llvm/Support/Signals.h
@@ -28,7 +28,7 @@ namespace sys {
/// This function registers signal handlers to ensure that if a signal gets
/// delivered that the named file is removed.
/// @brief Remove a file if a fatal signal occurs.
- bool RemoveFileOnSignal(StringRef Filename, std::string* ErrMsg = 0);
+ bool RemoveFileOnSignal(StringRef Filename, std::string* ErrMsg = nullptr);
/// This function removes a file from the list of files to be removed on
/// signal delivery.
diff --git a/include/llvm/Support/SourceMgr.h b/include/llvm/Support/SourceMgr.h
index dd48974..39f896d 100644
--- a/include/llvm/Support/SourceMgr.h
+++ b/include/llvm/Support/SourceMgr.h
@@ -71,7 +71,8 @@ private:
SourceMgr(const SourceMgr&) LLVM_DELETED_FUNCTION;
void operator=(const SourceMgr&) LLVM_DELETED_FUNCTION;
public:
- SourceMgr() : LineNoCache(0), DiagHandler(0), DiagContext(0) {}
+ SourceMgr()
+ : LineNoCache(nullptr), DiagHandler(nullptr), DiagContext(nullptr) {}
~SourceMgr();
void setIncludeDirs(const std::vector<std::string> &Dirs) {
@@ -80,7 +81,7 @@ public:
/// setDiagHandler - Specify a diagnostic handler to be invoked every time
/// PrintMessage is called. Ctx is passed into the handler when it is invoked.
- void setDiagHandler(DiagHandlerTy DH, void *Ctx = 0) {
+ void setDiagHandler(DiagHandlerTy DH, void *Ctx = nullptr) {
DiagHandler = DH;
DiagContext = Ctx;
}
@@ -222,10 +223,10 @@ class SMDiagnostic {
public:
// Null diagnostic.
SMDiagnostic()
- : SM(0), LineNo(0), ColumnNo(0), Kind(SourceMgr::DK_Error) {}
+ : SM(nullptr), LineNo(0), ColumnNo(0), Kind(SourceMgr::DK_Error) {}
// Diagnostic with no location (e.g. file not found, command line arg error).
SMDiagnostic(StringRef filename, SourceMgr::DiagKind Knd, StringRef Msg)
- : SM(0), Filename(filename), LineNo(-1), ColumnNo(-1), Kind(Knd),
+ : SM(nullptr), Filename(filename), LineNo(-1), ColumnNo(-1), Kind(Knd),
Message(Msg) {}
// Diagnostic with a location.
diff --git a/include/llvm/Support/StreamableMemoryObject.h b/include/llvm/Support/StreamableMemoryObject.h
index 0259630..9c9e55c 100644
--- a/include/llvm/Support/StreamableMemoryObject.h
+++ b/include/llvm/Support/StreamableMemoryObject.h
@@ -116,7 +116,7 @@ public:
// the memory doesn't go away/get reallocated, but it's
// not currently necessary. Users that need the pointer don't stream.
assert(0 && "getPointer in streaming memory objects not allowed");
- return NULL;
+ return nullptr;
}
bool isValidAddress(uint64_t address) const override;
bool isObjectEnd(uint64_t address) const override;
diff --git a/include/llvm/Support/StringPool.h b/include/llvm/Support/StringPool.h
index 71adbc5..7e1394c 100644
--- a/include/llvm/Support/StringPool.h
+++ b/include/llvm/Support/StringPool.h
@@ -48,7 +48,7 @@ namespace llvm {
unsigned Refcount; ///< Number of referencing PooledStringPtrs.
public:
- PooledString() : Pool(0), Refcount(0) { }
+ PooledString() : Pool(nullptr), Refcount(0) { }
};
friend class PooledStringPtr;
@@ -81,7 +81,7 @@ namespace llvm {
entry_t *S;
public:
- PooledStringPtr() : S(0) {}
+ PooledStringPtr() : S(nullptr) {}
explicit PooledStringPtr(entry_t *E) : S(E) {
if (S) ++S->getValue().Refcount;
@@ -107,7 +107,7 @@ namespace llvm {
S->getValue().Pool->InternTable.remove(S);
S->Destroy();
}
- S = 0;
+ S = nullptr;
}
~PooledStringPtr() { clear(); }
@@ -128,7 +128,7 @@ namespace llvm {
}
inline const char *operator*() const { return begin(); }
- inline operator bool() const { return S != 0; }
+ inline operator bool() const { return S != nullptr; }
inline bool operator==(const PooledStringPtr &That) { return S == That.S; }
inline bool operator!=(const PooledStringPtr &That) { return S != That.S; }
diff --git a/include/llvm/Support/TargetRegistry.h b/include/llvm/Support/TargetRegistry.h
index 8e7478c..fcdc604 100644
--- a/include/llvm/Support/TargetRegistry.h
+++ b/include/llvm/Support/TargetRegistry.h
@@ -45,14 +45,14 @@ namespace llvm {
class MCSymbolizer;
class MCRelocationInfo;
class MCTargetAsmParser;
+ class MCTargetOptions;
class TargetMachine;
class TargetOptions;
class raw_ostream;
class formatted_raw_ostream;
MCStreamer *createAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
- bool isVerboseAsm, bool useCFI,
- bool useDwarfDirectory,
+ bool isVerboseAsm, bool useDwarfDirectory,
MCInstPrinter *InstPrint, MCCodeEmitter *CE,
MCAsmBackend *TAB, bool ShowInst);
@@ -104,11 +104,14 @@ namespace llvm {
const MCRegisterInfo &MRI,
StringRef TT,
StringRef CPU);
- typedef MCTargetAsmParser *(*MCAsmParserCtorTy)(MCSubtargetInfo &STI,
- MCAsmParser &P,
- const MCInstrInfo &MII);
+ typedef MCTargetAsmParser *(*MCAsmParserCtorTy)(
+ MCSubtargetInfo &STI,
+ MCAsmParser &P,
+ const MCInstrInfo &MII,
+ const MCTargetOptions &Options);
typedef MCDisassembler *(*MCDisassemblerCtorTy)(const Target &T,
- const MCSubtargetInfo &STI);
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx);
typedef MCInstPrinter *(*MCInstPrinterCtorTy)(const Target &T,
unsigned SyntaxVariant,
const MCAsmInfo &MAI,
@@ -131,7 +134,6 @@ namespace llvm {
typedef MCStreamer *(*AsmStreamerCtorTy)(MCContext &Ctx,
formatted_raw_ostream &OS,
bool isVerboseAsm,
- bool useCFI,
bool useDwarfDirectory,
MCInstPrinter *InstPrint,
MCCodeEmitter *CE,
@@ -233,8 +235,8 @@ namespace llvm {
public:
Target()
- : AsmStreamerCtorFn(0), MCRelocationInfoCtorFn(0),
- MCSymbolizerCtorFn(0) {}
+ : AsmStreamerCtorFn(nullptr), MCRelocationInfoCtorFn(nullptr),
+ MCSymbolizerCtorFn(nullptr) {}
/// @name Target Information
/// @{
@@ -256,10 +258,10 @@ namespace llvm {
bool hasJIT() const { return HasJIT; }
/// hasTargetMachine - Check if this target supports code generation.
- bool hasTargetMachine() const { return TargetMachineCtorFn != 0; }
+ bool hasTargetMachine() const { return TargetMachineCtorFn != nullptr; }
/// hasMCAsmBackend - Check if this target supports .o generation.
- bool hasMCAsmBackend() const { return MCAsmBackendCtorFn != 0; }
+ bool hasMCAsmBackend() const { return MCAsmBackendCtorFn != nullptr; }
/// @}
/// @name Feature Constructors
@@ -275,7 +277,7 @@ namespace llvm {
MCAsmInfo *createMCAsmInfo(const MCRegisterInfo &MRI,
StringRef Triple) const {
if (!MCAsmInfoCtorFn)
- return 0;
+ return nullptr;
return MCAsmInfoCtorFn(MRI, Triple);
}
@@ -285,7 +287,7 @@ namespace llvm {
CodeModel::Model CM,
CodeGenOpt::Level OL) const {
if (!MCCodeGenInfoCtorFn)
- return 0;
+ return nullptr;
return MCCodeGenInfoCtorFn(Triple, RM, CM, OL);
}
@@ -293,7 +295,7 @@ namespace llvm {
///
MCInstrInfo *createMCInstrInfo() const {
if (!MCInstrInfoCtorFn)
- return 0;
+ return nullptr;
return MCInstrInfoCtorFn();
}
@@ -301,7 +303,7 @@ namespace llvm {
///
MCInstrAnalysis *createMCInstrAnalysis(const MCInstrInfo *Info) const {
if (!MCInstrAnalysisCtorFn)
- return 0;
+ return nullptr;
return MCInstrAnalysisCtorFn(Info);
}
@@ -309,7 +311,7 @@ namespace llvm {
///
MCRegisterInfo *createMCRegInfo(StringRef Triple) const {
if (!MCRegInfoCtorFn)
- return 0;
+ return nullptr;
return MCRegInfoCtorFn(Triple);
}
@@ -325,7 +327,7 @@ namespace llvm {
MCSubtargetInfo *createMCSubtargetInfo(StringRef Triple, StringRef CPU,
StringRef Features) const {
if (!MCSubtargetInfoCtorFn)
- return 0;
+ return nullptr;
return MCSubtargetInfoCtorFn(Triple, CPU, Features);
}
@@ -342,7 +344,7 @@ namespace llvm {
CodeModel::Model CM = CodeModel::Default,
CodeGenOpt::Level OL = CodeGenOpt::Default) const {
if (!TargetMachineCtorFn)
- return 0;
+ return nullptr;
return TargetMachineCtorFn(*this, Triple, CPU, Features, Options,
RM, CM, OL);
}
@@ -353,7 +355,7 @@ namespace llvm {
MCAsmBackend *createMCAsmBackend(const MCRegisterInfo &MRI,
StringRef Triple, StringRef CPU) const {
if (!MCAsmBackendCtorFn)
- return 0;
+ return nullptr;
return MCAsmBackendCtorFn(*this, MRI, Triple, CPU);
}
@@ -361,26 +363,29 @@ namespace llvm {
///
/// \param Parser The target independent parser implementation to use for
/// parsing and lexing.
- MCTargetAsmParser *createMCAsmParser(MCSubtargetInfo &STI,
- MCAsmParser &Parser,
- const MCInstrInfo &MII) const {
+ MCTargetAsmParser *createMCAsmParser(
+ MCSubtargetInfo &STI,
+ MCAsmParser &Parser,
+ const MCInstrInfo &MII,
+ const MCTargetOptions &Options) const {
if (!MCAsmParserCtorFn)
- return 0;
- return MCAsmParserCtorFn(STI, Parser, MII);
+ return nullptr;
+ return MCAsmParserCtorFn(STI, Parser, MII, Options);
}
/// createAsmPrinter - Create a target specific assembly printer pass. This
/// takes ownership of the MCStreamer object.
AsmPrinter *createAsmPrinter(TargetMachine &TM, MCStreamer &Streamer) const{
if (!AsmPrinterCtorFn)
- return 0;
+ return nullptr;
return AsmPrinterCtorFn(TM, Streamer);
}
- MCDisassembler *createMCDisassembler(const MCSubtargetInfo &STI) const {
+ MCDisassembler *createMCDisassembler(const MCSubtargetInfo &STI,
+ MCContext &Ctx) const {
if (!MCDisassemblerCtorFn)
- return 0;
- return MCDisassemblerCtorFn(*this, STI);
+ return nullptr;
+ return MCDisassemblerCtorFn(*this, STI, Ctx);
}
MCInstPrinter *createMCInstPrinter(unsigned SyntaxVariant,
@@ -389,7 +394,7 @@ namespace llvm {
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI) const {
if (!MCInstPrinterCtorFn)
- return 0;
+ return nullptr;
return MCInstPrinterCtorFn(*this, SyntaxVariant, MAI, MII, MRI, STI);
}
@@ -400,7 +405,7 @@ namespace llvm {
const MCSubtargetInfo &STI,
MCContext &Ctx) const {
if (!MCCodeEmitterCtorFn)
- return 0;
+ return nullptr;
return MCCodeEmitterCtorFn(II, MRI, STI, Ctx);
}
@@ -421,7 +426,7 @@ namespace llvm {
bool RelaxAll,
bool NoExecStack) const {
if (!MCObjectStreamerCtorFn)
- return 0;
+ return nullptr;
return MCObjectStreamerCtorFn(*this, TT, Ctx, TAB, _OS, _Emitter, STI,
RelaxAll, NoExecStack);
}
@@ -430,19 +435,16 @@ namespace llvm {
MCStreamer *createAsmStreamer(MCContext &Ctx,
formatted_raw_ostream &OS,
bool isVerboseAsm,
- bool useCFI,
bool useDwarfDirectory,
MCInstPrinter *InstPrint,
MCCodeEmitter *CE,
MCAsmBackend *TAB,
bool ShowInst) const {
if (AsmStreamerCtorFn)
- return AsmStreamerCtorFn(Ctx, OS, isVerboseAsm, useCFI,
- useDwarfDirectory, InstPrint, CE, TAB,
- ShowInst);
- return llvm::createAsmStreamer(Ctx, OS, isVerboseAsm, useCFI,
- useDwarfDirectory, InstPrint, CE, TAB,
- ShowInst);
+ return AsmStreamerCtorFn(Ctx, OS, isVerboseAsm, useDwarfDirectory,
+ InstPrint, CE, TAB, ShowInst);
+ return llvm::createAsmStreamer(Ctx, OS, isVerboseAsm, useDwarfDirectory,
+ InstPrint, CE, TAB, ShowInst);
}
/// createMCRelocationInfo - Create a target specific MCRelocationInfo.
@@ -486,7 +488,7 @@ namespace llvm {
explicit iterator(Target *T) : Current(T) {}
friend struct TargetRegistry;
public:
- iterator() : Current(0) {}
+ iterator() : Current(nullptr) {}
bool operator==(const iterator &x) const {
return Current == x.Current;
@@ -1097,8 +1099,9 @@ namespace llvm {
private:
static MCTargetAsmParser *Allocator(MCSubtargetInfo &STI, MCAsmParser &P,
- const MCInstrInfo &MII) {
- return new MCAsmParserImpl(STI, P, MII);
+ const MCInstrInfo &MII,
+ const MCTargetOptions &Options) {
+ return new MCAsmParserImpl(STI, P, MII, Options);
}
};
diff --git a/include/llvm/Support/Timer.h b/include/llvm/Support/Timer.h
index d009d7f..45c1828 100644
--- a/include/llvm/Support/Timer.h
+++ b/include/llvm/Support/Timer.h
@@ -85,24 +85,24 @@ class Timer {
Timer **Prev, *Next; // Doubly linked list of timers in the group.
public:
- explicit Timer(StringRef N) : TG(0) { init(N); }
- Timer(StringRef N, TimerGroup &tg) : TG(0) { init(N, tg); }
- Timer(const Timer &RHS) : TG(0) {
- assert(RHS.TG == 0 && "Can only copy uninitialized timers");
+ explicit Timer(StringRef N) : TG(nullptr) { init(N); }
+ Timer(StringRef N, TimerGroup &tg) : TG(nullptr) { init(N, tg); }
+ Timer(const Timer &RHS) : TG(nullptr) {
+ assert(!RHS.TG && "Can only copy uninitialized timers");
}
const Timer &operator=(const Timer &T) {
- assert(TG == 0 && T.TG == 0 && "Can only assign uninit timers");
+ assert(!TG && !T.TG && "Can only assign uninit timers");
return *this;
}
~Timer();
// Create an uninitialized timer, client must use 'init'.
- explicit Timer() : TG(0) {}
+ explicit Timer() : TG(nullptr) {}
void init(StringRef N);
void init(StringRef N, TimerGroup &tg);
const std::string &getName() const { return Name; }
- bool isInitialized() const { return TG != 0; }
+ bool isInitialized() const { return TG != nullptr; }
/// startTimer - Start the timer running. Time between calls to
/// startTimer/stopTimer is counted by the Timer class. Note that these calls
diff --git a/include/llvm/Support/Unicode.h b/include/llvm/Support/Unicode.h
index e6a52c4..f668a5b 100644
--- a/include/llvm/Support/Unicode.h
+++ b/include/llvm/Support/Unicode.h
@@ -12,6 +12,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_SUPPORT_UNICODE_H
+#define LLVM_SUPPORT_UNICODE_H
+
#include "llvm/ADT/StringRef.h"
namespace llvm {
@@ -60,3 +63,5 @@ int columnWidthUTF8(StringRef Text);
} // namespace unicode
} // namespace sys
} // namespace llvm
+
+#endif
diff --git a/include/llvm/Support/UnicodeCharRanges.h b/include/llvm/Support/UnicodeCharRanges.h
index 734d323..79137bf 100644
--- a/include/llvm/Support/UnicodeCharRanges.h
+++ b/include/llvm/Support/UnicodeCharRanges.h
@@ -21,6 +21,8 @@
namespace llvm {
namespace sys {
+#define DEBUG_TYPE "unicode"
+
/// \brief Represents a closed range of Unicode code points [Lower, Upper].
struct UnicodeCharRange {
uint32_t Lower;
@@ -88,6 +90,8 @@ private:
const CharRanges Ranges;
};
+#undef DEBUG_TYPE // "unicode"
+
} // namespace sys
} // namespace llvm
diff --git a/include/llvm/Support/YAMLParser.h b/include/llvm/Support/YAMLParser.h
index 5194b52..c39874c 100644
--- a/include/llvm/Support/YAMLParser.h
+++ b/include/llvm/Support/YAMLParser.h
@@ -60,26 +60,26 @@ class Node;
class Scanner;
struct Token;
-/// @brief Dump all the tokens in this stream to OS.
-/// @returns true if there was an error, false otherwise.
+/// \brief Dump all the tokens in this stream to OS.
+/// \returns true if there was an error, false otherwise.
bool dumpTokens(StringRef Input, raw_ostream &);
-/// @brief Scans all tokens in input without outputting anything. This is used
+/// \brief Scans all tokens in input without outputting anything. This is used
/// for benchmarking the tokenizer.
-/// @returns true if there was an error, false otherwise.
+/// \returns true if there was an error, false otherwise.
bool scanTokens(StringRef Input);
-/// @brief Escape \a Input for a double quoted scalar.
+/// \brief Escape \a Input for a double quoted scalar.
std::string escape(StringRef Input);
-/// @brief This class represents a YAML stream potentially containing multiple
+/// \brief This class represents a YAML stream potentially containing multiple
/// documents.
class Stream {
public:
- /// @brief This keeps a reference to the string referenced by \p Input.
+ /// \brief This keeps a reference to the string referenced by \p Input.
Stream(StringRef Input, SourceMgr &);
- /// @brief This takes ownership of \p InputBuffer.
+ /// \brief This takes ownership of \p InputBuffer.
Stream(MemoryBuffer *InputBuffer, SourceMgr &);
~Stream();
@@ -101,9 +101,10 @@ private:
friend class Document;
};
-/// @brief Abstract base class for all Nodes.
+/// \brief Abstract base class for all Nodes.
class Node {
- virtual void anchor();
+ virtual void anchor();
+
public:
enum NodeKind {
NK_Null,
@@ -117,7 +118,7 @@ public:
Node(unsigned int Type, std::unique_ptr<Document> &, StringRef Anchor,
StringRef Tag);
- /// @brief Get the value of the anchor attached to this node. If it does not
+ /// \brief Get the value of the anchor attached to this node. If it does not
/// have one, getAnchor().size() will be 0.
StringRef getAnchor() const { return Anchor; }
@@ -144,14 +145,13 @@ public:
unsigned int getType() const { return TypeID; }
- void *operator new ( size_t Size
- , BumpPtrAllocator &Alloc
- , size_t Alignment = 16) throw() {
+ void *operator new(size_t Size, BumpPtrAllocator &Alloc,
+ size_t Alignment = 16) throw() {
return Alloc.Allocate(Size, Alignment);
}
- void operator delete(void *Ptr, BumpPtrAllocator &Alloc, size_t) throw() {
- Alloc.Deallocate(Ptr);
+ void operator delete(void *Ptr, BumpPtrAllocator &Alloc, size_t Size) throw() {
+ Alloc.Deallocate(Ptr, Size);
}
protected:
@@ -169,28 +169,28 @@ private:
StringRef Tag;
};
-/// @brief A null value.
+/// \brief A null value.
///
/// Example:
/// !!null null
class NullNode : public Node {
void anchor() override;
+
public:
NullNode(std::unique_ptr<Document> &D)
: Node(NK_Null, D, StringRef(), StringRef()) {}
- static inline bool classof(const Node *N) {
- return N->getType() == NK_Null;
- }
+ static inline bool classof(const Node *N) { return N->getType() == NK_Null; }
};
-/// @brief A scalar node is an opaque datum that can be presented as a
+/// \brief A scalar node is an opaque datum that can be presented as a
/// series of zero or more Unicode scalar values.
///
/// Example:
/// Adena
class ScalarNode : public Node {
void anchor() override;
+
public:
ScalarNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
StringRef Val)
@@ -205,9 +205,9 @@ public:
// utf8).
StringRef getRawValue() const { return Value; }
- /// @brief Gets the value of this node as a StringRef.
+ /// \brief Gets the value of this node as a StringRef.
///
- /// @param Storage is used to store the content of the returned StringRef iff
+ /// \param Storage is used to store the content of the returned StringRef iff
/// it requires any modification from how it appeared in the source.
/// This happens with escaped characters and multi-line literals.
StringRef getValue(SmallVectorImpl<char> &Storage) const;
@@ -219,12 +219,12 @@ public:
private:
StringRef Value;
- StringRef unescapeDoubleQuoted( StringRef UnquotedValue
- , StringRef::size_type Start
- , SmallVectorImpl<char> &Storage) const;
+ StringRef unescapeDoubleQuoted(StringRef UnquotedValue,
+ StringRef::size_type Start,
+ SmallVectorImpl<char> &Storage) const;
};
-/// @brief A key and value pair. While not technically a Node under the YAML
+/// \brief A key and value pair. While not technically a Node under the YAML
/// representation graph, it is easier to treat them this way.
///
/// TODO: Consider making this not a child of Node.
@@ -233,22 +233,24 @@ private:
/// Section: .text
class KeyValueNode : public Node {
void anchor() override;
+
public:
KeyValueNode(std::unique_ptr<Document> &D)
- : Node(NK_KeyValue, D, StringRef(), StringRef()), Key(0), Value(0) {}
+ : Node(NK_KeyValue, D, StringRef(), StringRef()), Key(nullptr),
+ Value(nullptr) {}
- /// @brief Parse and return the key.
+ /// \brief Parse and return the key.
///
/// This may be called multiple times.
///
- /// @returns The key, or nullptr if failed() == true.
+ /// \returns The key, or nullptr if failed() == true.
Node *getKey();
- /// @brief Parse and return the value.
+ /// \brief Parse and return the value.
///
/// This may be called multiple times.
///
- /// @returns The value, or nullptr if failed() == true.
+ /// \returns The value, or nullptr if failed() == true.
Node *getValue();
void skip() override {
@@ -265,47 +267,47 @@ private:
Node *Value;
};
-/// @brief This is an iterator abstraction over YAML collections shared by both
+/// \brief This is an iterator abstraction over YAML collections shared by both
/// sequences and maps.
///
/// BaseT must have a ValueT* member named CurrentEntry and a member function
/// increment() which must set CurrentEntry to 0 to create an end iterator.
template <class BaseT, class ValueT>
class basic_collection_iterator
- : public std::iterator<std::forward_iterator_tag, ValueT> {
+ : public std::iterator<std::forward_iterator_tag, ValueT> {
public:
- basic_collection_iterator() : Base(0) {}
+ basic_collection_iterator() : Base(nullptr) {}
basic_collection_iterator(BaseT *B) : Base(B) {}
- ValueT *operator ->() const {
+ ValueT *operator->() const {
assert(Base && Base->CurrentEntry && "Attempted to access end iterator!");
return Base->CurrentEntry;
}
- ValueT &operator *() const {
+ ValueT &operator*() const {
assert(Base && Base->CurrentEntry &&
"Attempted to dereference end iterator!");
return *Base->CurrentEntry;
}
- operator ValueT*() const {
+ operator ValueT *() const {
assert(Base && Base->CurrentEntry && "Attempted to access end iterator!");
return Base->CurrentEntry;
}
- bool operator !=(const basic_collection_iterator &Other) const {
- if(Base != Other.Base)
+ bool operator!=(const basic_collection_iterator &Other) const {
+ if (Base != Other.Base)
return true;
- return (Base && Other.Base) && Base->CurrentEntry
- != Other.Base->CurrentEntry;
+ return (Base && Other.Base) &&
+ Base->CurrentEntry != Other.Base->CurrentEntry;
}
basic_collection_iterator &operator++() {
assert(Base && "Attempted to advance iterator past end!");
Base->increment();
// Create an end iterator.
- if (Base->CurrentEntry == 0)
- Base = 0;
+ if (!Base->CurrentEntry)
+ Base = nullptr;
return *this;
}
@@ -323,17 +325,16 @@ typename CollectionType::iterator begin(CollectionType &C) {
return ret;
}
-template <class CollectionType>
-void skip(CollectionType &C) {
+template <class CollectionType> void skip(CollectionType &C) {
// TODO: support skipping from the middle of a parsed collection ;/
assert((C.IsAtBeginning || C.IsAtEnd) && "Cannot skip mid parse!");
if (C.IsAtBeginning)
- for (typename CollectionType::iterator i = begin(C), e = C.end();
- i != e; ++i)
+ for (typename CollectionType::iterator i = begin(C), e = C.end(); i != e;
+ ++i)
i->skip();
}
-/// @brief Represents a YAML map created from either a block map for a flow map.
+/// \brief Represents a YAML map created from either a block map for a flow map.
///
/// This parses the YAML stream as increment() is called.
///
@@ -342,6 +343,7 @@ void skip(CollectionType &C) {
/// Scope: Global
class MappingNode : public Node {
void anchor() override;
+
public:
enum MappingType {
MT_Block,
@@ -352,22 +354,18 @@ public:
MappingNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
MappingType MT)
: Node(NK_Mapping, D, Anchor, Tag), Type(MT), IsAtBeginning(true),
- IsAtEnd(false), CurrentEntry(0) {}
+ IsAtEnd(false), CurrentEntry(nullptr) {}
friend class basic_collection_iterator<MappingNode, KeyValueNode>;
typedef basic_collection_iterator<MappingNode, KeyValueNode> iterator;
template <class T> friend typename T::iterator yaml::begin(T &);
template <class T> friend void yaml::skip(T &);
- iterator begin() {
- return yaml::begin(*this);
- }
+ iterator begin() { return yaml::begin(*this); }
iterator end() { return iterator(); }
- void skip() override {
- yaml::skip(*this);
- }
+ void skip() override { yaml::skip(*this); }
static inline bool classof(const Node *N) {
return N->getType() == NK_Mapping;
@@ -382,7 +380,7 @@ private:
void increment();
};
-/// @brief Represents a YAML sequence created from either a block sequence for a
+/// \brief Represents a YAML sequence created from either a block sequence for a
/// flow sequence.
///
/// This parses the YAML stream as increment() is called.
@@ -392,6 +390,7 @@ private:
/// - World
class SequenceNode : public Node {
void anchor() override;
+
public:
enum SequenceType {
ST_Block,
@@ -411,7 +410,7 @@ public:
: Node(NK_Sequence, D, Anchor, Tag), SeqType(ST), IsAtBeginning(true),
IsAtEnd(false),
WasPreviousTokenFlowEntry(true), // Start with an imaginary ','.
- CurrentEntry(0) {}
+ CurrentEntry(nullptr) {}
friend class basic_collection_iterator<SequenceNode, Node>;
typedef basic_collection_iterator<SequenceNode, Node> iterator;
@@ -420,15 +419,11 @@ public:
void increment();
- iterator begin() {
- return yaml::begin(*this);
- }
+ iterator begin() { return yaml::begin(*this); }
iterator end() { return iterator(); }
- void skip() override {
- yaml::skip(*this);
- }
+ void skip() override { yaml::skip(*this); }
static inline bool classof(const Node *N) {
return N->getType() == NK_Sequence;
@@ -442,12 +437,13 @@ private:
Node *CurrentEntry;
};
-/// @brief Represents an alias to a Node with an anchor.
+/// \brief Represents an alias to a Node with an anchor.
///
/// Example:
/// *AnchorName
class AliasNode : public Node {
void anchor() override;
+
public:
AliasNode(std::unique_ptr<Document> &D, StringRef Val)
: Node(NK_Alias, D, StringRef(), StringRef()), Name(Val) {}
@@ -455,50 +451,46 @@ public:
StringRef getName() const { return Name; }
Node *getTarget();
- static inline bool classof(const Node *N) {
- return N->getType() == NK_Alias;
- }
+ static inline bool classof(const Node *N) { return N->getType() == NK_Alias; }
private:
StringRef Name;
};
-/// @brief A YAML Stream is a sequence of Documents. A document contains a root
+/// \brief A YAML Stream is a sequence of Documents. A document contains a root
/// node.
class Document {
public:
- /// @brief Root for parsing a node. Returns a single node.
+ /// \brief Root for parsing a node. Returns a single node.
Node *parseBlockNode();
Document(Stream &ParentStream);
- /// @brief Finish parsing the current document and return true if there are
+ /// \brief Finish parsing the current document and return true if there are
/// more. Return false otherwise.
bool skip();
- /// @brief Parse and return the root level node.
+ /// \brief Parse and return the root level node.
Node *getRoot() {
if (Root)
return Root;
return Root = parseBlockNode();
}
- const std::map<StringRef, StringRef> &getTagMap() const {
- return TagMap;
- }
+ const std::map<StringRef, StringRef> &getTagMap() const { return TagMap; }
private:
friend class Node;
friend class document_iterator;
- /// @brief Stream to read tokens from.
+ /// \brief Stream to read tokens from.
Stream &stream;
- /// @brief Used to allocate nodes to. All are destroyed without calling their
+ /// \brief Used to allocate nodes to. All are destroyed without calling their
/// destructor when the document is destroyed.
BumpPtrAllocator NodeAllocator;
- /// @brief The root node. Used to support skipping a partially parsed
+ /// \brief The root node. Used to support skipping a partially parsed
/// document.
Node *Root;
@@ -510,7 +502,7 @@ private:
void setError(const Twine &Message, Token &Location) const;
bool failed() const;
- /// @brief Parse %BLAH directives and return true if any were encountered.
+ /// \brief Parse %BLAH directives and return true if any were encountered.
bool parseDirectives();
/// \brief Parse %YAML
@@ -519,30 +511,28 @@ private:
/// \brief Parse %TAG
void parseTAGDirective();
- /// @brief Consume the next token and error if it is not \a TK.
+ /// \brief Consume the next token and error if it is not \a TK.
bool expectToken(int TK);
};
-/// @brief Iterator abstraction for Documents over a Stream.
+/// \brief Iterator abstraction for Documents over a Stream.
class document_iterator {
public:
- document_iterator() : Doc(0) {}
+ document_iterator() : Doc(nullptr) {}
document_iterator(std::unique_ptr<Document> &D) : Doc(&D) {}
- bool operator ==(const document_iterator &Other) {
+ bool operator==(const document_iterator &Other) {
if (isAtEnd() || Other.isAtEnd())
return isAtEnd() && Other.isAtEnd();
return Doc == Other.Doc;
}
- bool operator !=(const document_iterator &Other) {
- return !(*this == Other);
- }
+ bool operator!=(const document_iterator &Other) { return !(*this == Other); }
- document_iterator operator ++() {
- assert(Doc != 0 && "incrementing iterator past the end.");
+ document_iterator operator++() {
+ assert(Doc && "incrementing iterator past the end.");
if (!(*Doc)->skip()) {
- Doc->reset(0);
+ Doc->reset(nullptr);
} else {
Stream &S = (*Doc)->stream;
Doc->reset(new Document(S));
@@ -550,21 +540,18 @@ public:
return *this;
}
- Document &operator *() {
- return *Doc->get();
- }
+ Document &operator*() { return *Doc->get(); }
std::unique_ptr<Document> &operator->() { return *Doc; }
private:
- bool isAtEnd() const {
- return !Doc || !*Doc;
- }
+ bool isAtEnd() const { return !Doc || !*Doc; }
std::unique_ptr<Document> *Doc;
};
-}
-}
+} // End namespace yaml.
+
+} // End namespace llvm.
#endif
diff --git a/include/llvm/Support/YAMLTraits.h b/include/llvm/Support/YAMLTraits.h
index ea217c3..4ee05ed 100644
--- a/include/llvm/Support/YAMLTraits.h
+++ b/include/llvm/Support/YAMLTraits.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Regex.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/raw_ostream.h"
@@ -32,7 +33,7 @@ namespace yaml {
/// This class should be specialized by any type that needs to be converted
/// to/from a YAML mapping. For example:
///
-/// struct ScalarBitSetTraits<MyStruct> {
+/// struct MappingTraits<MyStruct> {
/// static void mapping(IO &io, MyStruct &s) {
/// io.mapRequired("name", s.name);
/// io.mapRequired("size", s.size);
@@ -98,6 +99,7 @@ struct ScalarBitSetTraits {
/// // return empty string on success, or error string
/// return StringRef();
/// }
+/// static bool mustQuote(StringRef) { return true; }
/// };
template<typename T>
struct ScalarTraits {
@@ -109,6 +111,9 @@ struct ScalarTraits {
// Function to convert a string to a value. Returns the empty
// StringRef on success or an error string if string is malformed:
//static StringRef input(StringRef scalar, void *ctxt, T &value);
+ //
+ // Function to determine if the value should be quoted.
+ //static bool mustQuote(StringRef);
};
@@ -171,7 +176,8 @@ struct has_ScalarEnumerationTraits
static double test(...);
public:
- static bool const value = (sizeof(test<ScalarEnumerationTraits<T> >(0)) == 1);
+ static bool const value =
+ (sizeof(test<ScalarEnumerationTraits<T> >(nullptr)) == 1);
};
@@ -188,7 +194,7 @@ struct has_ScalarBitSetTraits
static double test(...);
public:
- static bool const value = (sizeof(test<ScalarBitSetTraits<T> >(0)) == 1);
+ static bool const value = (sizeof(test<ScalarBitSetTraits<T> >(nullptr)) == 1);
};
@@ -198,16 +204,19 @@ struct has_ScalarTraits
{
typedef StringRef (*Signature_input)(StringRef, void*, T&);
typedef void (*Signature_output)(const T&, void*, llvm::raw_ostream&);
+ typedef bool (*Signature_mustQuote)(StringRef);
template <typename U>
- static char test(SameType<Signature_input, &U::input>*,
- SameType<Signature_output, &U::output>*);
+ static char test(SameType<Signature_input, &U::input> *,
+ SameType<Signature_output, &U::output> *,
+ SameType<Signature_mustQuote, &U::mustQuote> *);
template <typename U>
static double test(...);
public:
- static bool const value = (sizeof(test<ScalarTraits<T> >(0,0)) == 1);
+ static bool const value =
+ (sizeof(test<ScalarTraits<T>>(nullptr, nullptr, nullptr)) == 1);
};
@@ -224,7 +233,7 @@ struct has_MappingTraits
static double test(...);
public:
- static bool const value = (sizeof(test<MappingTraits<T> >(0)) == 1);
+ static bool const value = (sizeof(test<MappingTraits<T> >(nullptr)) == 1);
};
// Test if MappingTraits<T>::validate() is defined on type T.
@@ -240,7 +249,7 @@ struct has_MappingValidateTraits
static double test(...);
public:
- static bool const value = (sizeof(test<MappingTraits<T> >(0)) == 1);
+ static bool const value = (sizeof(test<MappingTraits<T> >(nullptr)) == 1);
};
@@ -258,7 +267,7 @@ struct has_SequenceMethodTraits
static double test(...);
public:
- static bool const value = (sizeof(test<SequenceTraits<T> >(0)) == 1);
+ static bool const value = (sizeof(test<SequenceTraits<T> >(nullptr)) == 1);
};
@@ -288,7 +297,7 @@ struct has_FlowTraits<T, true>
static char (&f(...))[2];
public:
- static bool const value = sizeof(f<Derived>(0)) == 2;
+ static bool const value = sizeof(f<Derived>(nullptr)) == 2;
};
@@ -312,10 +321,84 @@ struct has_DocumentListTraits
static double test(...);
public:
- static bool const value = (sizeof(test<DocumentListTraits<T> >(0)) == 1);
+ static bool const value = (sizeof(test<DocumentListTraits<T> >(nullptr))==1);
};
+inline bool isNumber(StringRef S) {
+ static const char OctalChars[] = "01234567";
+ if (S.startswith("0") &&
+ S.drop_front().find_first_not_of(OctalChars) == StringRef::npos)
+ return true;
+
+ if (S.startswith("0o") &&
+ S.drop_front(2).find_first_not_of(OctalChars) == StringRef::npos)
+ return true;
+
+ static const char HexChars[] = "0123456789abcdefABCDEF";
+ if (S.startswith("0x") &&
+ S.drop_front(2).find_first_not_of(HexChars) == StringRef::npos)
+ return true;
+
+ static const char DecChars[] = "0123456789";
+ if (S.find_first_not_of(DecChars) == StringRef::npos)
+ return true;
+
+ if (S.equals(".inf") || S.equals(".Inf") || S.equals(".INF"))
+ return true;
+
+ Regex FloatMatcher("^(\\.[0-9]+|[0-9]+(\\.[0-9]*)?)([eE][-+]?[0-9]+)?$");
+ if (FloatMatcher.match(S))
+ return true;
+
+ return false;
+}
+
+inline bool isNumeric(StringRef S) {
+ if ((S.front() == '-' || S.front() == '+') && isNumber(S.drop_front()))
+ return true;
+
+ if (isNumber(S))
+ return true;
+
+ if (S.equals(".nan") || S.equals(".NaN") || S.equals(".NAN"))
+ return true;
+
+ return false;
+}
+
+inline bool isNull(StringRef S) {
+ return S.equals("null") || S.equals("Null") || S.equals("NULL") ||
+ S.equals("~");
+}
+inline bool isBool(StringRef S) {
+ return S.equals("true") || S.equals("True") || S.equals("TRUE") ||
+ S.equals("false") || S.equals("False") || S.equals("FALSE");
+}
+
+inline bool needsQuotes(StringRef S) {
+ if (S.empty())
+ return true;
+ if (isspace(S.front()) || isspace(S.back()))
+ return true;
+ if (S.front() == ',')
+ return true;
+
+ static const char ScalarSafeChars[] =
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-/^., \t";
+ if (S.find_first_not_of(ScalarSafeChars) != StringRef::npos)
+ return true;
+
+ if (isNull(S))
+ return true;
+ if (isBool(S))
+ return true;
+ if (isNumeric(S))
+ return true;
+
+ return false;
+}
template<typename T>
@@ -340,7 +423,7 @@ struct unvalidatedMappingTraits : public std::integral_constant<bool,
class IO {
public:
- IO(void *Ctxt=NULL);
+ IO(void *Ctxt=nullptr);
virtual ~IO();
virtual bool outputting() = 0;
@@ -370,7 +453,7 @@ public:
virtual bool bitSetMatch(const char*, bool) = 0;
virtual void endBitSetScalar() = 0;
- virtual void scalarString(StringRef &) = 0;
+ virtual void scalarString(StringRef &, bool) = 0;
virtual void setError(const Twine &) = 0;
@@ -404,6 +487,19 @@ public:
}
}
+ template <typename T>
+ void maskedBitSetCase(T &Val, const char *Str, T ConstVal, T Mask) {
+ if (bitSetMatch(Str, outputting() && (Val & Mask) == ConstVal))
+ Val = Val | ConstVal;
+ }
+
+ template <typename T>
+ void maskedBitSetCase(T &Val, const char *Str, uint32_t ConstVal,
+ uint32_t Mask) {
+ if (bitSetMatch(Str, outputting() && (Val & Mask) == ConstVal))
+ Val = Val | ConstVal;
+ }
+
void *getContext();
void setContext(void *);
@@ -520,11 +616,11 @@ yamlize(IO &io, T &Val, bool) {
llvm::raw_string_ostream Buffer(Storage);
ScalarTraits<T>::output(Val, io.getContext(), Buffer);
StringRef Str = Buffer.str();
- io.scalarString(Str);
+ io.scalarString(Str, ScalarTraits<T>::mustQuote(Str));
}
else {
StringRef Str;
- io.scalarString(Str);
+ io.scalarString(Str, ScalarTraits<T>::mustQuote(Str));
StringRef Result = ScalarTraits<T>::input(Str, io.getContext(), Val);
if ( !Result.empty() ) {
io.setError(llvm::Twine(Result));
@@ -601,78 +697,91 @@ template<>
struct ScalarTraits<bool> {
static void output(const bool &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, bool &);
+ static bool mustQuote(StringRef) { return false; }
};
template<>
struct ScalarTraits<StringRef> {
static void output(const StringRef &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, StringRef &);
+ static bool mustQuote(StringRef S) { return needsQuotes(S); }
};
template<>
struct ScalarTraits<std::string> {
static void output(const std::string &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, std::string &);
+ static bool mustQuote(StringRef S) { return needsQuotes(S); }
};
template<>
struct ScalarTraits<uint8_t> {
static void output(const uint8_t &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, uint8_t &);
+ static bool mustQuote(StringRef) { return false; }
};
template<>
struct ScalarTraits<uint16_t> {
static void output(const uint16_t &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, uint16_t &);
+ static bool mustQuote(StringRef) { return false; }
};
template<>
struct ScalarTraits<uint32_t> {
static void output(const uint32_t &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, uint32_t &);
+ static bool mustQuote(StringRef) { return false; }
};
template<>
struct ScalarTraits<uint64_t> {
static void output(const uint64_t &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, uint64_t &);
+ static bool mustQuote(StringRef) { return false; }
};
template<>
struct ScalarTraits<int8_t> {
static void output(const int8_t &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, int8_t &);
+ static bool mustQuote(StringRef) { return false; }
};
template<>
struct ScalarTraits<int16_t> {
static void output(const int16_t &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, int16_t &);
+ static bool mustQuote(StringRef) { return false; }
};
template<>
struct ScalarTraits<int32_t> {
static void output(const int32_t &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, int32_t &);
+ static bool mustQuote(StringRef) { return false; }
};
template<>
struct ScalarTraits<int64_t> {
static void output(const int64_t &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, int64_t &);
+ static bool mustQuote(StringRef) { return false; }
};
template<>
struct ScalarTraits<float> {
static void output(const float &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, float &);
+ static bool mustQuote(StringRef) { return false; }
};
template<>
struct ScalarTraits<double> {
static void output(const double &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, double &);
+ static bool mustQuote(StringRef) { return false; }
};
@@ -682,7 +791,7 @@ struct ScalarTraits<double> {
template <typename TNorm, typename TFinal>
struct MappingNormalization {
MappingNormalization(IO &i_o, TFinal &Obj)
- : io(i_o), BufPtr(NULL), Result(Obj) {
+ : io(i_o), BufPtr(nullptr), Result(Obj) {
if ( io.outputting() ) {
BufPtr = new (&Buffer) TNorm(io, Obj);
}
@@ -765,9 +874,9 @@ public:
// user-data. The DiagHandler can be specified to provide
// alternative error reporting.
Input(StringRef InputContent,
- void *Ctxt = NULL,
- SourceMgr::DiagHandlerTy DiagHandler = NULL,
- void *DiagHandlerCtxt = NULL);
+ void *Ctxt = nullptr,
+ SourceMgr::DiagHandlerTy DiagHandler = nullptr,
+ void *DiagHandlerCtxt = nullptr);
~Input();
// Check if there was an syntax or semantic error during parsing.
@@ -794,7 +903,7 @@ private:
bool beginBitSetScalar(bool &) override;
bool bitSetMatch(const char *, bool ) override;
void endBitSetScalar() override;
- void scalarString(StringRef &) override;
+ void scalarString(StringRef &, bool) override;
void setError(const Twine &message) override;
bool canElideEmptySequence() override;
@@ -896,7 +1005,7 @@ private:
///
class Output : public IO {
public:
- Output(llvm::raw_ostream &, void *Ctxt=NULL);
+ Output(llvm::raw_ostream &, void *Ctxt=nullptr);
virtual ~Output();
bool outputting() override;
@@ -919,7 +1028,7 @@ public:
bool beginBitSetScalar(bool &) override;
bool bitSetMatch(const char *, bool ) override;
void endBitSetScalar() override;
- void scalarString(StringRef &) override;
+ void scalarString(StringRef &, bool) override;
void setError(const Twine &message) override;
bool canElideEmptySequence() override;
public:
@@ -990,24 +1099,28 @@ template<>
struct ScalarTraits<Hex8> {
static void output(const Hex8 &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, Hex8 &);
+ static bool mustQuote(StringRef) { return false; }
};
template<>
struct ScalarTraits<Hex16> {
static void output(const Hex16 &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, Hex16 &);
+ static bool mustQuote(StringRef) { return false; }
};
template<>
struct ScalarTraits<Hex32> {
static void output(const Hex32 &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, Hex32 &);
+ static bool mustQuote(StringRef) { return false; }
};
template<>
struct ScalarTraits<Hex64> {
static void output(const Hex64 &, void*, llvm::raw_ostream &);
static StringRef input(StringRef, void*, Hex64 &);
+ static bool mustQuote(StringRef) { return false; }
};
diff --git a/include/llvm/Support/circular_raw_ostream.h b/include/llvm/Support/circular_raw_ostream.h
index 3114199..ee7b89f 100644
--- a/include/llvm/Support/circular_raw_ostream.h
+++ b/include/llvm/Support/circular_raw_ostream.h
@@ -109,10 +109,10 @@ namespace llvm
circular_raw_ostream(raw_ostream &Stream, const char *Header,
size_t BuffSize = 0, bool Owns = REFERENCE_ONLY)
: raw_ostream(/*unbuffered*/true),
- TheStream(0),
+ TheStream(nullptr),
OwnsStream(Owns),
BufferSize(BuffSize),
- BufferArray(0),
+ BufferArray(nullptr),
Filled(false),
Banner(Header) {
if (BufferSize != 0)
@@ -122,9 +122,9 @@ namespace llvm
}
explicit circular_raw_ostream()
: raw_ostream(/*unbuffered*/true),
- TheStream(0),
+ TheStream(nullptr),
OwnsStream(REFERENCE_ONLY),
- BufferArray(0),
+ BufferArray(nullptr),
Filled(false),
Banner("") {
Cur = BufferArray;
diff --git a/include/llvm/Support/raw_ostream.h b/include/llvm/Support/raw_ostream.h
index 0240035..34fbe08 100644
--- a/include/llvm/Support/raw_ostream.h
+++ b/include/llvm/Support/raw_ostream.h
@@ -17,13 +17,18 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
-#include "llvm/Support/FileSystem.h"
namespace llvm {
class format_object_base;
template <typename T>
class SmallVectorImpl;
+ namespace sys {
+ namespace fs {
+ enum OpenFlags : unsigned;
+ }
+ }
+
/// raw_ostream - This class implements an extremely fast bulk output stream
/// that can *only* output to a stream. It does not support seeking, reopening,
/// rewinding, line buffered disciplines etc. It is a simple buffer that outputs
@@ -76,7 +81,7 @@ public:
explicit raw_ostream(bool unbuffered=false)
: BufferMode(unbuffered ? Unbuffered : InternalBuffer) {
// Start out ready to flush.
- OutBufStart = OutBufEnd = OutBufCur = 0;
+ OutBufStart = OutBufEnd = OutBufCur = nullptr;
}
virtual ~raw_ostream();
@@ -102,7 +107,7 @@ public:
size_t GetBufferSize() const {
// If we're supposed to be buffered but haven't actually gotten around
// to allocating the buffer yet, return the value that would be used.
- if (BufferMode != Unbuffered && OutBufStart == 0)
+ if (BufferMode != Unbuffered && OutBufStart == nullptr)
return preferred_buffer_size();
// Otherwise just return the size of the allocated buffer.
@@ -115,7 +120,7 @@ public:
/// set to unbuffered.
void SetUnbuffered() {
flush();
- SetBufferAndMode(0, 0, Unbuffered);
+ SetBufferAndMode(nullptr, 0, Unbuffered);
}
size_t GetNumBytesInBuffer() const {
@@ -157,7 +162,7 @@ public:
size_t Size = Str.size();
// Make sure we can use the fast path.
- if (OutBufCur+Size > OutBufEnd)
+ if (Size > (size_t)(OutBufEnd - OutBufCur))
return write(Str.data(), Size);
memcpy(OutBufCur, Str.data(), Size);
diff --git a/include/llvm/Support/system_error.h b/include/llvm/Support/system_error.h
index 4ca4b06..aa5e9f7 100644
--- a/include/llvm/Support/system_error.h
+++ b/include/llvm/Support/system_error.h
@@ -706,7 +706,7 @@ public:
static void unspecified_bool_true() {}
operator unspecified_bool_type() const { // true if error
- return _val_ == 0 ? 0 : unspecified_bool_true;
+ return _val_ == 0 ? nullptr : unspecified_bool_true;
}
};
@@ -771,7 +771,7 @@ public:
static void unspecified_bool_true() {}
operator unspecified_bool_type() const { // true if error
- return _val_ == 0 ? 0 : unspecified_bool_true;
+ return _val_ == 0 ? nullptr : unspecified_bool_true;
}
};
diff --git a/include/llvm/TableGen/Error.h b/include/llvm/TableGen/Error.h
index 17ac418..3df658d 100644
--- a/include/llvm/TableGen/Error.h
+++ b/include/llvm/TableGen/Error.h
@@ -34,7 +34,6 @@ LLVM_ATTRIBUTE_NORETURN void PrintFatalError(ArrayRef<SMLoc> ErrorLoc,
extern SourceMgr SrcMgr;
extern unsigned ErrorsPrinted;
-
} // end namespace "llvm"
#endif
diff --git a/include/llvm/TableGen/Main.h b/include/llvm/TableGen/Main.h
index 6b51e20..866b986 100644
--- a/include/llvm/TableGen/Main.h
+++ b/include/llvm/TableGen/Main.h
@@ -23,7 +23,6 @@ class raw_ostream;
typedef bool TableGenMainFn(raw_ostream &OS, RecordKeeper &Records);
int TableGenMain(char *argv0, TableGenMainFn *MainFn);
-
}
#endif
diff --git a/include/llvm/TableGen/Record.h b/include/llvm/TableGen/Record.h
index 2bed006..36464d7 100644
--- a/include/llvm/TableGen/Record.h
+++ b/include/llvm/TableGen/Record.h
@@ -87,7 +87,7 @@ private:
public:
RecTyKind getRecTyKind() const { return Kind; }
- RecTy(RecTyKind K) : Kind(K), ListTy(0) {}
+ RecTy(RecTyKind K) : Kind(K), ListTy(nullptr) {}
virtual ~RecTy() {}
virtual std::string getAsString() const = 0;
@@ -102,12 +102,12 @@ public:
ListRecTy *getListTy();
public: // These methods should only be called from subclasses of Init
- virtual Init *convertValue( UnsetInit *UI) { return 0; }
- virtual Init *convertValue( BitInit *BI) { return 0; }
- virtual Init *convertValue( BitsInit *BI) { return 0; }
- virtual Init *convertValue( IntInit *II) { return 0; }
- virtual Init *convertValue(StringInit *SI) { return 0; }
- virtual Init *convertValue( ListInit *LI) { return 0; }
+ virtual Init *convertValue( UnsetInit *UI) { return nullptr; }
+ virtual Init *convertValue( BitInit *BI) { return nullptr; }
+ virtual Init *convertValue( BitsInit *BI) { return nullptr; }
+ virtual Init *convertValue( IntInit *II) { return nullptr; }
+ virtual Init *convertValue(StringInit *SI) { return nullptr; }
+ virtual Init *convertValue( ListInit *LI) { return nullptr; }
virtual Init *convertValue( UnOpInit *UI) {
return convertValue((TypedInit*)UI);
}
@@ -117,10 +117,10 @@ public: // These methods should only be called from subclasses of Init
virtual Init *convertValue( TernOpInit *UI) {
return convertValue((TypedInit*)UI);
}
- virtual Init *convertValue(VarBitInit *VB) { return 0; }
- virtual Init *convertValue( DefInit *DI) { return 0; }
- virtual Init *convertValue( DagInit *DI) { return 0; }
- virtual Init *convertValue( TypedInit *TI) { return 0; }
+ virtual Init *convertValue(VarBitInit *VB) { return nullptr; }
+ virtual Init *convertValue( DefInit *DI) { return nullptr; }
+ virtual Init *convertValue( DagInit *DI) { return nullptr; }
+ virtual Init *convertValue( TypedInit *TI) { return nullptr; }
virtual Init *convertValue( VarInit *VI) {
return convertValue((TypedInit*)VI);
}
@@ -137,12 +137,12 @@ inline raw_ostream &operator<<(raw_ostream &OS, const RecTy &Ty) {
return OS;
}
-
/// BitRecTy - 'bit' - Represent a single bit
///
class BitRecTy : public RecTy {
static BitRecTy Shared;
BitRecTy() : RecTy(BitRecTyKind) {}
+
public:
static bool classof(const RecTy *RT) {
return RT->getRecTyKind() == BitRecTyKind;
@@ -154,11 +154,11 @@ public:
Init *convertValue( BitInit *BI) override { return (Init*)BI; }
Init *convertValue( BitsInit *BI) override;
Init *convertValue( IntInit *II) override;
- Init *convertValue(StringInit *SI) override { return 0; }
- Init *convertValue( ListInit *LI) override { return 0; }
+ Init *convertValue(StringInit *SI) override { return nullptr; }
+ Init *convertValue( ListInit *LI) override { return nullptr; }
Init *convertValue(VarBitInit *VB) override { return (Init*)VB; }
- Init *convertValue( DefInit *DI) override { return 0; }
- Init *convertValue( DagInit *DI) override { return 0; }
+ Init *convertValue( DefInit *DI) override { return nullptr; }
+ Init *convertValue( DagInit *DI) override { return nullptr; }
Init *convertValue( UnOpInit *UI) override { return RecTy::convertValue(UI);}
Init *convertValue( BinOpInit *UI) override { return RecTy::convertValue(UI);}
Init *convertValue( TernOpInit *UI) override {return RecTy::convertValue(UI);}
@@ -174,12 +174,12 @@ public:
bool baseClassOf(const RecTy*) const override;
};
-
/// BitsRecTy - 'bits<n>' - Represent a fixed number of bits
///
class BitsRecTy : public RecTy {
unsigned Size;
explicit BitsRecTy(unsigned Sz) : RecTy(BitsRecTyKind), Size(Sz) {}
+
public:
static bool classof(const RecTy *RT) {
return RT->getRecTyKind() == BitsRecTyKind;
@@ -193,32 +193,32 @@ public:
Init *convertValue( BitInit *UI) override;
Init *convertValue( BitsInit *BI) override;
Init *convertValue( IntInit *II) override;
- Init *convertValue(StringInit *SI) override { return 0; }
- Init *convertValue( ListInit *LI) override { return 0; }
- Init *convertValue(VarBitInit *VB) override { return 0; }
- Init *convertValue( DefInit *DI) override { return 0; }
- Init *convertValue( DagInit *DI) override { return 0; }
- Init *convertValue( UnOpInit *UI) override { return RecTy::convertValue(UI);}
+ Init *convertValue(StringInit *SI) override { return nullptr; }
+ Init *convertValue( ListInit *LI) override { return nullptr; }
+ Init *convertValue(VarBitInit *VB) override { return nullptr; }
+ Init *convertValue( DefInit *DI) override { return nullptr; }
+ Init *convertValue( DagInit *DI) override { return nullptr; }
+ Init *convertValue( UnOpInit *UI) override { return RecTy::convertValue(UI);}
Init *convertValue( BinOpInit *UI) override { return RecTy::convertValue(UI);}
- Init *convertValue( TernOpInit *UI) override {return RecTy::convertValue(UI);}
+ Init *convertValue(TernOpInit *UI) override { return RecTy::convertValue(UI);}
Init *convertValue( TypedInit *TI) override;
- Init *convertValue( VarInit *VI) override{ return RecTy::convertValue(VI);}
- Init *convertValue( FieldInit *FI) override{ return RecTy::convertValue(FI);}
+ Init *convertValue( VarInit *VI) override { return RecTy::convertValue(VI);}
+ Init *convertValue( FieldInit *FI) override { return RecTy::convertValue(FI);}
std::string getAsString() const override;
- bool typeIsConvertibleTo(const RecTy *RHS) const override{
+ bool typeIsConvertibleTo(const RecTy *RHS) const override {
return RHS->baseClassOf(this);
}
bool baseClassOf(const RecTy*) const override;
};
-
/// IntRecTy - 'int' - Represent an integer value of no particular size
///
class IntRecTy : public RecTy {
static IntRecTy Shared;
IntRecTy() : RecTy(IntRecTyKind) {}
+
public:
static bool classof(const RecTy *RT) {
return RT->getRecTyKind() == IntRecTyKind;
@@ -230,11 +230,11 @@ public:
Init *convertValue( BitInit *BI) override;
Init *convertValue( BitsInit *BI) override;
Init *convertValue( IntInit *II) override { return (Init*)II; }
- Init *convertValue(StringInit *SI) override { return 0; }
- Init *convertValue( ListInit *LI) override { return 0; }
- Init *convertValue(VarBitInit *VB) override { return 0; }
- Init *convertValue( DefInit *DI) override { return 0; }
- Init *convertValue( DagInit *DI) override { return 0; }
+ Init *convertValue(StringInit *SI) override { return nullptr; }
+ Init *convertValue( ListInit *LI) override { return nullptr; }
+ Init *convertValue(VarBitInit *VB) override { return nullptr; }
+ Init *convertValue( DefInit *DI) override { return nullptr; }
+ Init *convertValue( DagInit *DI) override { return nullptr; }
Init *convertValue( UnOpInit *UI) override { return RecTy::convertValue(UI);}
Init *convertValue( BinOpInit *UI) override { return RecTy::convertValue(UI);}
Init *convertValue( TernOpInit *UI) override {return RecTy::convertValue(UI);}
@@ -256,6 +256,7 @@ public:
class StringRecTy : public RecTy {
static StringRecTy Shared;
StringRecTy() : RecTy(StringRecTyKind) {}
+
public:
static bool classof(const RecTy *RT) {
return RT->getRecTyKind() == StringRecTyKind;
@@ -264,18 +265,18 @@ public:
static StringRecTy *get() { return &Shared; }
Init *convertValue( UnsetInit *UI) override { return (Init*)UI; }
- Init *convertValue( BitInit *BI) override { return 0; }
- Init *convertValue( BitsInit *BI) override { return 0; }
- Init *convertValue( IntInit *II) override { return 0; }
+ Init *convertValue( BitInit *BI) override { return nullptr; }
+ Init *convertValue( BitsInit *BI) override { return nullptr; }
+ Init *convertValue( IntInit *II) override { return nullptr; }
Init *convertValue(StringInit *SI) override { return (Init*)SI; }
- Init *convertValue( ListInit *LI) override { return 0; }
+ Init *convertValue( ListInit *LI) override { return nullptr; }
Init *convertValue( UnOpInit *BO) override;
Init *convertValue( BinOpInit *BO) override;
Init *convertValue( TernOpInit *BO) override {return RecTy::convertValue(BO);}
- Init *convertValue(VarBitInit *VB) override { return 0; }
- Init *convertValue( DefInit *DI) override { return 0; }
- Init *convertValue( DagInit *DI) override { return 0; }
+ Init *convertValue(VarBitInit *VB) override { return nullptr; }
+ Init *convertValue( DefInit *DI) override { return nullptr; }
+ Init *convertValue( DagInit *DI) override { return nullptr; }
Init *convertValue( TypedInit *TI) override;
Init *convertValue( VarInit *VI) override { return RecTy::convertValue(VI);}
Init *convertValue( FieldInit *FI) override { return RecTy::convertValue(FI);}
@@ -294,6 +295,7 @@ class ListRecTy : public RecTy {
RecTy *Ty;
explicit ListRecTy(RecTy *T) : RecTy(ListRecTyKind), Ty(T) {}
friend ListRecTy *RecTy::getListTy();
+
public:
static bool classof(const RecTy *RT) {
return RT->getRecTyKind() == ListRecTyKind;
@@ -303,24 +305,24 @@ public:
RecTy *getElementType() const { return Ty; }
Init *convertValue( UnsetInit *UI) override { return (Init*)UI; }
- Init *convertValue( BitInit *BI) override { return 0; }
- Init *convertValue( BitsInit *BI) override { return 0; }
- Init *convertValue( IntInit *II) override { return 0; }
- Init *convertValue(StringInit *SI) override { return 0; }
+ Init *convertValue( BitInit *BI) override { return nullptr; }
+ Init *convertValue( BitsInit *BI) override { return nullptr; }
+ Init *convertValue( IntInit *II) override { return nullptr; }
+ Init *convertValue(StringInit *SI) override { return nullptr; }
Init *convertValue( ListInit *LI) override;
- Init *convertValue(VarBitInit *VB) override { return 0; }
- Init *convertValue( DefInit *DI) override { return 0; }
- Init *convertValue( DagInit *DI) override { return 0; }
- Init *convertValue( UnOpInit *UI) override { return RecTy::convertValue(UI);}
+ Init *convertValue(VarBitInit *VB) override { return nullptr; }
+ Init *convertValue( DefInit *DI) override { return nullptr; }
+ Init *convertValue( DagInit *DI) override { return nullptr; }
+ Init *convertValue( UnOpInit *UI) override { return RecTy::convertValue(UI);}
Init *convertValue( BinOpInit *UI) override { return RecTy::convertValue(UI);}
- Init *convertValue( TernOpInit *UI) override{ return RecTy::convertValue(UI);}
+ Init *convertValue(TernOpInit *UI) override { return RecTy::convertValue(UI);}
Init *convertValue( TypedInit *TI) override;
Init *convertValue( VarInit *VI) override { return RecTy::convertValue(VI);}
Init *convertValue( FieldInit *FI) override { return RecTy::convertValue(FI);}
std::string getAsString() const override;
- bool typeIsConvertibleTo(const RecTy *RHS) const override{
+ bool typeIsConvertibleTo(const RecTy *RHS) const override {
return RHS->baseClassOf(this);
}
@@ -332,6 +334,7 @@ public:
class DagRecTy : public RecTy {
static DagRecTy Shared;
DagRecTy() : RecTy(DagRecTyKind) {}
+
public:
static bool classof(const RecTy *RT) {
return RT->getRecTyKind() == DagRecTyKind;
@@ -340,13 +343,13 @@ public:
static DagRecTy *get() { return &Shared; }
Init *convertValue( UnsetInit *UI) override { return (Init*)UI; }
- Init *convertValue( BitInit *BI) override { return 0; }
- Init *convertValue( BitsInit *BI) override { return 0; }
- Init *convertValue( IntInit *II) override { return 0; }
- Init *convertValue(StringInit *SI) override { return 0; }
- Init *convertValue( ListInit *LI) override { return 0; }
- Init *convertValue(VarBitInit *VB) override { return 0; }
- Init *convertValue( DefInit *DI) override { return 0; }
+ Init *convertValue( BitInit *BI) override { return nullptr; }
+ Init *convertValue( BitsInit *BI) override { return nullptr; }
+ Init *convertValue( IntInit *II) override { return nullptr; }
+ Init *convertValue(StringInit *SI) override { return nullptr; }
+ Init *convertValue( ListInit *LI) override { return nullptr; }
+ Init *convertValue(VarBitInit *VB) override { return nullptr; }
+ Init *convertValue( DefInit *DI) override { return nullptr; }
Init *convertValue( UnOpInit *BO) override;
Init *convertValue( BinOpInit *BO) override;
Init *convertValue( TernOpInit *BO) override {return RecTy::convertValue(BO);}
@@ -357,12 +360,11 @@ public:
std::string getAsString() const override { return "dag"; }
- bool typeIsConvertibleTo(const RecTy *RHS) const override{
+ bool typeIsConvertibleTo(const RecTy *RHS) const override {
return RHS->baseClassOf(this);
}
};
-
/// RecordRecTy - '[classname]' - Represent an instance of a class, such as:
/// (R32 X = EAX).
///
@@ -370,6 +372,7 @@ class RecordRecTy : public RecTy {
Record *Rec;
explicit RecordRecTy(Record *R) : RecTy(RecordRecTyKind), Rec(R) {}
friend class Record;
+
public:
static bool classof(const RecTy *RT) {
return RT->getRecTyKind() == RecordRecTyKind;
@@ -380,17 +383,17 @@ public:
Record *getRecord() const { return Rec; }
Init *convertValue( UnsetInit *UI) override { return (Init*)UI; }
- Init *convertValue( BitInit *BI) override { return 0; }
- Init *convertValue( BitsInit *BI) override { return 0; }
- Init *convertValue( IntInit *II) override { return 0; }
- Init *convertValue(StringInit *SI) override { return 0; }
- Init *convertValue( ListInit *LI) override { return 0; }
- Init *convertValue(VarBitInit *VB) override { return 0; }
+ Init *convertValue( BitInit *BI) override { return nullptr; }
+ Init *convertValue( BitsInit *BI) override { return nullptr; }
+ Init *convertValue( IntInit *II) override { return nullptr; }
+ Init *convertValue(StringInit *SI) override { return nullptr; }
+ Init *convertValue( ListInit *LI) override { return nullptr; }
+ Init *convertValue(VarBitInit *VB) override { return nullptr; }
Init *convertValue( UnOpInit *UI) override { return RecTy::convertValue(UI);}
Init *convertValue( BinOpInit *UI) override { return RecTy::convertValue(UI);}
Init *convertValue( TernOpInit *UI) override {return RecTy::convertValue(UI);}
Init *convertValue( DefInit *DI) override;
- Init *convertValue( DagInit *DI) override { return 0; }
+ Init *convertValue( DagInit *DI) override { return nullptr; }
Init *convertValue( TypedInit *VI) override;
Init *convertValue( VarInit *VI) override { return RecTy::convertValue(VI);}
Init *convertValue( FieldInit *FI) override { return RecTy::convertValue(FI);}
@@ -496,7 +499,7 @@ public:
///
virtual Init *
convertInitializerBitRange(const std::vector<unsigned> &Bits) const {
- return 0;
+ return nullptr;
}
/// convertInitListSlice - This method is used to implement the list slice
@@ -506,14 +509,16 @@ public:
///
virtual Init *
convertInitListSlice(const std::vector<unsigned> &Elements) const {
- return 0;
+ return nullptr;
}
/// getFieldType - This method is used to implement the FieldInit class.
/// Implementors of this method should return the type of the named field if
/// they are of record type.
///
- virtual RecTy *getFieldType(const std::string &FieldName) const { return 0; }
+ virtual RecTy *getFieldType(const std::string &FieldName) const {
+ return nullptr;
+ }
/// getFieldInit - This method complements getFieldType to return the
/// initializer for the specified field. If getFieldType returns non-null
@@ -521,7 +526,7 @@ public:
///
virtual Init *getFieldInit(Record &R, const RecordVal *RV,
const std::string &FieldName) const {
- return 0;
+ return nullptr;
}
/// resolveReferences - This method is used by classes that refer to other
@@ -587,7 +592,6 @@ public:
unsigned Elt) const = 0;
};
-
/// UnsetInit - ? - Represents an uninitialized value
///
class UnsetInit : public Init {
@@ -614,7 +618,6 @@ public:
std::string getAsString() const override { return "?"; }
};
-
/// BitInit - true/false - Represent a concrete initializer for a bit.
///
class BitInit : public Init {
@@ -693,7 +696,6 @@ public:
}
};
-
/// IntInit - 7 - Represent an initialization by a literal integer value.
///
class IntInit : public TypedInit {
@@ -734,7 +736,6 @@ public:
}
};
-
/// StringInit - "foo" - Represent an initialization by a string value.
///
class StringInit : public TypedInit {
@@ -779,6 +780,7 @@ public:
///
class ListInit : public TypedInit, public FoldingSetNode {
std::vector<Init*> Values;
+
public:
typedef std::vector<Init*>::const_iterator const_iterator;
@@ -841,7 +843,6 @@ public:
}
};
-
/// OpInit - Base class for operators
///
class OpInit : public TypedInit {
@@ -876,12 +877,12 @@ public:
Init *getBit(unsigned Bit) const override;
};
-
/// UnOpInit - !op (X) - Transform an init.
///
class UnOpInit : public OpInit {
public:
enum UnaryOp { CAST, HEAD, TAIL, EMPTY };
+
private:
UnaryOp Opc;
Init *LHS;
@@ -927,7 +928,8 @@ public:
///
class BinOpInit : public OpInit {
public:
- enum BinaryOp { ADD, SHL, SRA, SRL, STRCONCAT, CONCAT, EQ };
+ enum BinaryOp { ADD, SHL, SRA, SRL, LISTCONCAT, STRCONCAT, CONCAT, EQ };
+
private:
BinaryOp Opc;
Init *LHS, *RHS;
@@ -980,6 +982,7 @@ public:
class TernOpInit : public OpInit {
public:
enum TernaryOp { SUBST, FOREACH, IF };
+
private:
TernaryOp Opc;
Init *LHS, *MHS, *RHS;
@@ -1036,7 +1039,6 @@ public:
std::string getAsString() const override;
};
-
/// VarInit - 'Opcode' - Represent a reference to an entire variable object.
///
class VarInit : public TypedInit {
@@ -1086,7 +1088,6 @@ public:
std::string getAsString() const override { return getName(); }
};
-
/// VarBitInit - Opcode{0} - Represent access to one bit of a variable or field.
///
class VarBitInit : public Init {
@@ -1212,7 +1213,6 @@ public:
}
};
-
/// FieldInit - X.Y - Represent a reference to a subfield of a variable
///
class FieldInit : public TypedInit {
@@ -1339,6 +1339,7 @@ class RecordVal {
RecTy *Ty;
unsigned Prefix;
Init *Value;
+
public:
RecordVal(Init *N, RecTy *T, unsigned P);
RecordVal(const std::string &N, RecTy *T, unsigned P);
@@ -1356,9 +1357,9 @@ public:
bool setValue(Init *V) {
if (V) {
Value = V->convertInitializerTo(Ty);
- return Value == 0;
+ return Value == nullptr;
}
- Value = 0;
+ Value = nullptr;
return false;
}
@@ -1395,18 +1396,17 @@ class Record {
void checkName();
public:
-
// Constructs a record.
explicit Record(const std::string &N, ArrayRef<SMLoc> locs,
RecordKeeper &records, bool Anonymous = false) :
ID(LastID++), Name(StringInit::get(N)), Locs(locs.begin(), locs.end()),
- TrackedRecords(records), TheInit(0), IsAnonymous(Anonymous) {
+ TrackedRecords(records), TheInit(nullptr), IsAnonymous(Anonymous) {
init();
}
explicit Record(Init *N, ArrayRef<SMLoc> locs, RecordKeeper &records,
bool Anonymous = false) :
ID(LastID++), Name(N), Locs(locs.begin(), locs.end()),
- TrackedRecords(records), TheInit(0), IsAnonymous(Anonymous) {
+ TrackedRecords(records), TheInit(nullptr), IsAnonymous(Anonymous) {
init();
}
@@ -1420,10 +1420,8 @@ public:
~Record() {}
-
static unsigned getNewUID() { return LastID++; }
-
unsigned getID() const { return ID; }
const std::string &getName() const;
@@ -1461,7 +1459,7 @@ public:
const RecordVal *getValue(const Init *Name) const {
for (unsigned i = 0, e = Values.size(); i != e; ++i)
if (Values[i].getNameInit() == Name) return &Values[i];
- return 0;
+ return nullptr;
}
const RecordVal *getValue(StringRef Name) const {
return getValue(StringInit::get(Name));
@@ -1469,7 +1467,7 @@ public:
RecordVal *getValue(const Init *Name) {
for (unsigned i = 0, e = Values.size(); i != e; ++i)
if (Values[i].getNameInit() == Name) return &Values[i];
- return 0;
+ return nullptr;
}
RecordVal *getValue(StringRef Name) {
return getValue(StringInit::get(Name));
@@ -1484,7 +1482,7 @@ public:
}
void addValue(const RecordVal &RV) {
- assert(getValue(RV.getNameInit()) == 0 && "Value already added!");
+ assert(getValue(RV.getNameInit()) == nullptr && "Value already added!");
Values.push_back(RV);
if (Values.size() > 1)
// Keep NAME at the end of the list. It makes record dumps a
@@ -1531,7 +1529,7 @@ public:
/// resolveReferences - If there are any field references that refer to fields
/// that have been filled in, we can propagate the values now.
///
- void resolveReferences() { resolveReferencesTo(0); }
+ void resolveReferences() { resolveReferencesTo(nullptr); }
/// resolveReferencesTo - If anything in this record refers to RV, replace the
/// reference to RV with the RHS of RV. If RV is null, we resolve all
@@ -1660,11 +1658,11 @@ public:
Record *getClass(const std::string &Name) const {
std::map<std::string, Record*>::const_iterator I = Classes.find(Name);
- return I == Classes.end() ? 0 : I->second;
+ return I == Classes.end() ? nullptr : I->second;
}
Record *getDef(const std::string &Name) const {
std::map<std::string, Record*>::const_iterator I = Defs.find(Name);
- return I == Defs.end() ? 0 : I->second;
+ return I == Defs.end() ? nullptr : I->second;
}
void addClass(Record *R) {
bool Ins = Classes.insert(std::make_pair(R->getName(), R)).second;
diff --git a/include/llvm/TableGen/StringMatcher.h b/include/llvm/TableGen/StringMatcher.h
index 99cbcad..b438779 100644
--- a/include/llvm/TableGen/StringMatcher.h
+++ b/include/llvm/TableGen/StringMatcher.h
@@ -21,29 +21,29 @@
namespace llvm {
class raw_ostream;
-
+
/// StringMatcher - Given a list of strings and code to execute when they match,
/// output a simple switch tree to classify the input string.
-///
+///
/// If a match is found, the code in Vals[i].second is executed; control must
/// not exit this code fragment. If nothing matches, execution falls through.
///
class StringMatcher {
public:
typedef std::pair<std::string, std::string> StringPair;
+
private:
StringRef StrVariableName;
const std::vector<StringPair> &Matches;
raw_ostream &OS;
-
+
public:
StringMatcher(StringRef strVariableName,
const std::vector<StringPair> &matches, raw_ostream &os)
: StrVariableName(strVariableName), Matches(matches), OS(os) {}
-
+
void Emit(unsigned Indent = 0) const;
-
-
+
private:
bool EmitStringMatcherForChar(const std::vector<const StringPair*> &Matches,
unsigned CharNo, unsigned IndentCount) const;
diff --git a/include/llvm/TableGen/StringToOffsetTable.h b/include/llvm/TableGen/StringToOffsetTable.h
index d94d3a2..c924bd8 100644
--- a/include/llvm/TableGen/StringToOffsetTable.h
+++ b/include/llvm/TableGen/StringToOffsetTable.h
@@ -25,8 +25,8 @@ namespace llvm {
class StringToOffsetTable {
StringMap<unsigned> StringOffset;
std::string AggregateString;
+
public:
-
unsigned GetOrAddStringOffset(StringRef Str, bool appendZero = true) {
StringMapEntry<unsigned> &Entry = StringOffset.GetOrCreateValue(Str, -1U);
if (Entry.getValue() == -1U) {
@@ -36,10 +36,10 @@ public:
if (appendZero)
AggregateString += '\0';
}
-
+
return Entry.getValue();
}
-
+
void EmitString(raw_ostream &O) {
// Escape the string.
SmallString<256> Str;
@@ -55,11 +55,11 @@ public:
}
O << AggregateString[i];
++CharsPrinted;
-
+
// Print escape sequences all together.
if (AggregateString[i] != '\\')
continue;
-
+
assert(i+1 < AggregateString.size() && "Incomplete escape sequence!");
if (isdigit(AggregateString[i+1])) {
assert(isdigit(AggregateString[i+2]) &&
diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td
index facb89a..7d1f19c 100644
--- a/include/llvm/Target/Target.td
+++ b/include/llvm/Target/Target.td
@@ -950,10 +950,15 @@ class MnemonicAlias<string From, string To, string VariantName = ""> {
/// InstAlias - This defines an alternate assembly syntax that is allowed to
/// match an instruction that has a different (more canonical) assembly
/// representation.
-class InstAlias<string Asm, dag Result, bit Emit = 0b1> {
+class InstAlias<string Asm, dag Result, int Emit = 1> {
string AsmString = Asm; // The .s format to match the instruction with.
dag ResultInst = Result; // The MCInst to generate.
- bit EmitAlias = Emit; // Emit the alias instead of what's aliased.
+
+ // This determines which order the InstPrinter detects aliases for
+ // printing. A larger value makes the alias more likely to be
+ // emitted. The Instruction's own definition is notionally 0.5, so 0
+ // disables printing and 1 enables it if there are no conflicting aliases.
+ int EmitPriority = Emit;
// Predicates - Predicates that must be true for this to match.
list<Predicate> Predicates = [];
diff --git a/include/llvm/Target/TargetCallingConv.h b/include/llvm/Target/TargetCallingConv.h
index a660403..a0f2674 100644
--- a/include/llvm/Target/TargetCallingConv.h
+++ b/include/llvm/Target/TargetCallingConv.h
@@ -47,8 +47,12 @@ namespace ISD {
static const uint64_t InAllocaOffs = 12;
static const uint64_t OrigAlign = 0x1FULL<<27;
static const uint64_t OrigAlignOffs = 27;
- static const uint64_t ByValSize = 0xffffffffULL<<32; ///< Struct size
+ static const uint64_t ByValSize = 0x3fffffffULL<<32; ///< Struct size
static const uint64_t ByValSizeOffs = 32;
+ static const uint64_t InConsecutiveRegsLast = 0x1ULL<<62; ///< Struct size
+ static const uint64_t InConsecutiveRegsLastOffs = 62;
+ static const uint64_t InConsecutiveRegs = 0x1ULL<<63; ///< Struct size
+ static const uint64_t InConsecutiveRegsOffs = 63;
static const uint64_t One = 1ULL; ///< 1 of this type, for shifts
@@ -80,6 +84,12 @@ namespace ISD {
bool isReturned() const { return Flags & Returned; }
void setReturned() { Flags |= One << ReturnedOffs; }
+ bool isInConsecutiveRegs() const { return Flags & InConsecutiveRegs; }
+ void setInConsecutiveRegs() { Flags |= One << InConsecutiveRegsOffs; }
+
+ bool isInConsecutiveRegsLast() const { return Flags & InConsecutiveRegsLast; }
+ void setInConsecutiveRegsLast() { Flags |= One << InConsecutiveRegsLastOffs; }
+
unsigned getByValAlign() const {
return (unsigned)
((One << ((Flags & ByValAlign) >> ByValAlignOffs)) / 2);
diff --git a/include/llvm/Target/TargetCallingConv.td b/include/llvm/Target/TargetCallingConv.td
index 9d1dc38..8f31e08 100644
--- a/include/llvm/Target/TargetCallingConv.td
+++ b/include/llvm/Target/TargetCallingConv.td
@@ -42,6 +42,11 @@ class CCIf<string predicate, CCAction A> : CCPredicateAction<A> {
class CCIfByVal<CCAction A> : CCIf<"ArgFlags.isByVal()", A> {
}
+/// CCIfConsecutiveRegs - If the current argument has InConsecutiveRegs
+/// parameter attribute, apply Action A.
+class CCIfConsecutiveRegs<CCAction A> : CCIf<"ArgFlags.isInConsecutiveRegs()", A> {
+}
+
/// CCIfCC - Match if the current calling convention is 'CC'.
class CCIfCC<string CC, CCAction A>
: CCIf<!strconcat("State.getCallingConv() == ", CC), A> {}
diff --git a/include/llvm/Target/TargetFrameLowering.h b/include/llvm/Target/TargetFrameLowering.h
index a60147f..7c42e23 100644
--- a/include/llvm/Target/TargetFrameLowering.h
+++ b/include/llvm/Target/TargetFrameLowering.h
@@ -105,7 +105,7 @@ public:
virtual const SpillSlot *
getCalleeSavedSpillSlots(unsigned &NumEntries) const {
NumEntries = 0;
- return 0;
+ return nullptr;
}
/// targetHandlesStackFrameRounding - Returns true if the target is
@@ -190,7 +190,7 @@ public:
/// before PrologEpilogInserter scans the physical registers used to determine
/// what callee saved registers should be spilled. This method is optional.
virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const {
+ RegScavenger *RS = nullptr) const {
}
@@ -200,7 +200,7 @@ public:
/// replaced with direct constants. This method is optional.
///
virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF,
- RegScavenger *RS = NULL) const {
+ RegScavenger *RS = nullptr) const {
}
/// eliminateCallFramePseudoInstr - This method is called during prolog/epilog
diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h
index d4e14f6..165b35f 100644
--- a/include/llvm/Target/TargetInstrInfo.h
+++ b/include/llvm/Target/TargetInstrInfo.h
@@ -66,7 +66,7 @@ public:
/// rematerializable, meaning it has no side effects and requires no operands
/// that aren't always available.
bool isTriviallyReMaterializable(const MachineInstr *MI,
- AliasAnalysis *AA = 0) const {
+ AliasAnalysis *AA = nullptr) const {
return MI->getOpcode() == TargetOpcode::IMPLICIT_DEF ||
(MI->getDesc().isRematerializable() &&
(isReallyTriviallyReMaterializable(MI, AA) ||
@@ -230,7 +230,7 @@ public:
virtual MachineInstr *
convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const {
- return 0;
+ return nullptr;
}
/// commuteInstruction - If a target has any instructions that are
@@ -257,7 +257,7 @@ public:
/// aggressive checks.
virtual bool produceSameValue(const MachineInstr *MI0,
const MachineInstr *MI1,
- const MachineRegisterInfo *MRI = 0) const;
+ const MachineRegisterInfo *MRI = nullptr) const;
/// AnalyzeBranch - Analyze the branching code at the end of MBB, returning
/// true if it cannot be understood (e.g. it's a switch dispatch or isn't
@@ -555,7 +555,7 @@ protected:
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
- return 0;
+ return nullptr;
}
/// foldMemoryOperandImpl - Target-dependent implementation for
@@ -565,7 +565,7 @@ protected:
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
- return 0;
+ return nullptr;
}
public:
@@ -597,7 +597,7 @@ public:
/// value.
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore,
- unsigned *LoadRegIndex = 0) const {
+ unsigned *LoadRegIndex = nullptr) const {
return 0;
}
@@ -780,7 +780,7 @@ public:
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const {
- return 0;
+ return nullptr;
}
/// FoldImmediate - 'Reg' is known to be defined by a move immediate
@@ -838,7 +838,7 @@ public:
/// PredCost.
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
- unsigned *PredCost = 0) const;
+ unsigned *PredCost = nullptr) const;
virtual unsigned getPredicationCost(const MachineInstr *MI) const;
@@ -1003,7 +1003,7 @@ public:
/// Create machine specific model for scheduling.
virtual DFAPacketizer*
CreateTargetScheduleState(const TargetMachine*, const ScheduleDAG*) const {
- return NULL;
+ return nullptr;
}
private:
diff --git a/include/llvm/Target/TargetIntrinsicInfo.h b/include/llvm/Target/TargetIntrinsicInfo.h
index ce21349..6de264e 100644
--- a/include/llvm/Target/TargetIntrinsicInfo.h
+++ b/include/llvm/Target/TargetIntrinsicInfo.h
@@ -40,7 +40,7 @@ public:
/// intrinsic, Tys should point to an array of numTys pointers to Type,
/// and must provide exactly one type for each overloaded type in the
/// intrinsic.
- virtual std::string getName(unsigned IID, Type **Tys = 0,
+ virtual std::string getName(unsigned IID, Type **Tys = nullptr,
unsigned numTys = 0) const = 0;
/// Look up target intrinsic by name. Return intrinsic ID or 0 for unknown
@@ -56,7 +56,7 @@ public:
/// Create or insert an LLVM Function declaration for an intrinsic,
/// and return it. The Tys and numTys are for intrinsics with overloaded
/// types. See above for more information.
- virtual Function *getDeclaration(Module *M, unsigned ID, Type **Tys = 0,
+ virtual Function *getDeclaration(Module *M, unsigned ID, Type **Tys = nullptr,
unsigned numTys = 0) const = 0;
};
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 2f6445f..60a4079 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -31,6 +31,8 @@
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Target/TargetCallingConv.h"
#include "llvm/Target/TargetMachine.h"
#include <climits>
@@ -180,6 +182,9 @@ public:
return HasMultipleConditionRegisters;
}
+ /// Return true if the target has BitExtract instructions.
+ bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
+
/// Return true if a vector of the given type should be split
/// (TypeSplitVector) instead of promoted (TypePromoteInteger) during type
/// legalization.
@@ -322,7 +327,7 @@ public:
bool isTypeLegal(EVT VT) const {
assert(!VT.isSimple() ||
(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
- return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0;
+ return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
}
class ValueTypeActionImpl {
@@ -332,7 +337,7 @@ public:
public:
ValueTypeActionImpl() {
- std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0);
+ std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions), 0);
}
LegalizeTypeAction getTypeAction(MVT VT) const {
@@ -754,7 +759,7 @@ public:
/// alignment error (trap) on the target machine.
virtual bool allowsUnalignedMemoryAccesses(EVT,
unsigned AddrSpace = 0,
- bool * /*Fast*/ = 0) const {
+ bool * /*Fast*/ = nullptr) const {
return false;
}
@@ -896,6 +901,35 @@ public:
/// @}
//===--------------------------------------------------------------------===//
+ /// \name Helpers for load-linked/store-conditional atomic expansion.
+ /// @{
+
+ /// Perform a load-linked operation on Addr, returning a "Value *" with the
+ /// corresponding pointee type. This may entail some non-trivial operations to
+ /// truncate or reconstruct types that will be illegal in the backend. See
+ /// ARMISelLowering for an example implementation.
+ virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
+ AtomicOrdering Ord) const {
+ llvm_unreachable("Load linked unimplemented on this target");
+ }
+
+ /// Perform a store-conditional operation to Addr. Return the status of the
+ /// store. This should be 0 if the store succeeded, non-zero otherwise.
+ virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
+ Value *Addr, AtomicOrdering Ord) const {
+ llvm_unreachable("Store conditional unimplemented on this target");
+ }
+
+ /// Return true if the given (atomic) instruction should be expanded by the
+ /// IR-level AtomicExpandLoadLinked pass into a loop involving
+ /// load-linked/store-conditional pairs. Atomic stores will be expanded in the
+ /// same way as "atomic xchg" operations which ignore their output if needed.
+ virtual bool shouldExpandAtomicInIR(Instruction *Inst) const {
+ return false;
+ }
+
+
+ //===--------------------------------------------------------------------===//
// TargetLowering Configuration Methods - These methods should be invoked by
// the derived class constructor to configure this object for the target.
//
@@ -975,6 +1009,14 @@ protected:
HasMultipleConditionRegisters = hasManyRegs;
}
+ /// Tells the code generator that the target has BitExtract instructions.
+ /// The code generator will aggressively sink "shift"s into the blocks of
+ /// their users if the users will generate "and" instructions which can be
+ /// combined with "shift" to BitExtract instructions.
+ void setHasExtractBitsInsn(bool hasExtractInsn = true) {
+ HasExtractBitsInsn = hasExtractInsn;
+ }
+
/// Tells the code generator not to expand sequence of operations into a
/// separate sequences that increases the amount of flow control.
void setJumpIsExpensive(bool isExpensive = true) {
@@ -1178,7 +1220,7 @@ public:
int64_t BaseOffs;
bool HasBaseReg;
int64_t Scale;
- AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
+ AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
};
/// Return true if the addressing mode represented by AM is legal for this
@@ -1394,6 +1436,12 @@ private:
/// the blocks of their users.
bool HasMultipleConditionRegisters;
+ /// Tells the code generator that the target has BitExtract instructions.
+ /// The code generator will aggressively sink "shift"s into the blocks of
+ /// their users if the users will generate "and" instructions which can be
+ /// combined with "shift" to BitExtract instructions.
+ bool HasExtractBitsInsn;
+
/// Tells the code generator not to expand integer divides by constants into a
/// sequence of muls, adds, and shifts. This is a hack until a real cost
/// model is in place. If we ever optimize for size, this will be set to true
@@ -1895,15 +1943,16 @@ public:
/// Determine which of the bits specified in Mask are known to be either zero
/// or one and return them in the KnownZero/KnownOne bitsets.
- virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const;
+ virtual void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const;
/// This method can be implemented by targets that want to expose additional
/// information about sign bits to the DAG Combiner.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
+ const SelectionDAG &DAG,
unsigned Depth = 0) const;
struct DAGCombinerInfo {
@@ -1968,6 +2017,15 @@ public:
///
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ /// Return true if it is profitable to move a following shift through this
+ // node, adjusting any immediate operands as necessary to preserve semantics.
+ // This transformation may not be desirable if it disrupts a particularly
+ // auspicious target-specific tree (e.g. bitfield extraction in AArch64).
+ // By default, it returns true.
+ virtual bool isDesirableToCommuteWithShift(const SDNode *N /*Op*/) const {
+ return true;
+ }
+
/// Return true if the target has native support for the specified value type
/// and it is 'desirable' to use the type for the given node type. e.g. On x86
/// i16 is legal, but undesirable since i16 instruction encodings are longer
@@ -2053,7 +2111,7 @@ public:
unsigned NumFixedArgs;
CallingConv::ID CallConv;
SDValue Callee;
- ArgListTy &Args;
+ ArgListTy *Args;
SelectionDAG &DAG;
SDLoc DL;
ImmutableCallSite *CS;
@@ -2061,33 +2119,96 @@ public:
SmallVector<SDValue, 32> OutVals;
SmallVector<ISD::InputArg, 32> Ins;
+ CallLoweringInfo(SelectionDAG &DAG)
+ : RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
+ IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
+ IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
+ Args(nullptr), DAG(DAG), CS(nullptr) {}
+
+ CallLoweringInfo &setDebugLoc(SDLoc dl) {
+ DL = dl;
+ return *this;
+ }
+
+ CallLoweringInfo &setChain(SDValue InChain) {
+ Chain = InChain;
+ return *this;
+ }
+
+ CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
+ SDValue Target, ArgListTy *ArgsList,
+ unsigned FixedArgs = -1) {
+ RetTy = ResultType;
+ Callee = Target;
+ CallConv = CC;
+ NumFixedArgs =
+ (FixedArgs == static_cast<unsigned>(-1) ? Args->size() : FixedArgs);
+ Args = ArgsList;
+ return *this;
+ }
+
+ CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
+ SDValue Target, ArgListTy *ArgsList,
+ ImmutableCallSite &Call) {
+ RetTy = ResultType;
+
+ IsInReg = Call.paramHasAttr(0, Attribute::InReg);
+ DoesNotReturn = Call.doesNotReturn();
+ IsVarArg = FTy->isVarArg();
+ IsReturnValueUsed = !Call.getInstruction()->use_empty();
+ RetSExt = Call.paramHasAttr(0, Attribute::SExt);
+ RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
+
+ Callee = Target;
+
+ CallConv = Call.getCallingConv();
+ NumFixedArgs = FTy->getNumParams();
+ Args = ArgsList;
+
+ CS = &Call;
- /// Constructs a call lowering context based on the ImmutableCallSite \p cs.
- CallLoweringInfo(SDValue chain, Type *retTy,
- FunctionType *FTy, bool isTailCall, SDValue callee,
- ArgListTy &args, SelectionDAG &dag, SDLoc dl,
- ImmutableCallSite &cs)
- : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attribute::SExt)),
- RetZExt(cs.paramHasAttr(0, Attribute::ZExt)), IsVarArg(FTy->isVarArg()),
- IsInReg(cs.paramHasAttr(0, Attribute::InReg)),
- DoesNotReturn(cs.doesNotReturn()),
- IsReturnValueUsed(!cs.getInstruction()->use_empty()),
- IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()),
- CallConv(cs.getCallingConv()), Callee(callee), Args(args), DAG(dag),
- DL(dl), CS(&cs) {}
-
- /// Constructs a call lowering context based on the provided call
- /// information.
- CallLoweringInfo(SDValue chain, Type *retTy, bool retSExt, bool retZExt,
- bool isVarArg, bool isInReg, unsigned numFixedArgs,
- CallingConv::ID callConv, bool isTailCall,
- bool doesNotReturn, bool isReturnValueUsed, SDValue callee,
- ArgListTy &args, SelectionDAG &dag, SDLoc dl)
- : Chain(chain), RetTy(retTy), RetSExt(retSExt), RetZExt(retZExt),
- IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn),
- IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall),
- NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee),
- Args(args), DAG(dag), DL(dl), CS(NULL) {}
+ return *this;
+ }
+
+ CallLoweringInfo &setInRegister(bool Value = true) {
+ IsInReg = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setNoReturn(bool Value = true) {
+ DoesNotReturn = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setVarArg(bool Value = true) {
+ IsVarArg = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setTailCall(bool Value = true) {
+ IsTailCall = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setDiscardResult(bool Value = true) {
+ IsReturnValueUsed = !Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setSExtResult(bool Value = true) {
+ RetSExt = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setZExtResult(bool Value = true) {
+ RetZExt = Value;
+ return *this;
+ }
+
+ ArgListTy &getArgs() {
+ assert(Args && "Arguments must be set before accessing them");
+ return *Args;
+ }
};
/// This function lowers an abstract call to a function into an actual call.
@@ -2156,6 +2277,13 @@ public:
return "__clear_cache";
}
+ /// Return the register ID of the name passed in. Used by named register
+ /// global variables extension. There is no target-independent behaviour
+ /// so the default action is to bail.
+ virtual unsigned getRegisterByName(const char* RegName, EVT VT) const {
+ report_fatal_error("Named registers not implemented for this target");
+ }
+
/// Return the type that should be used to zero or sign extend a
/// zeroext/signext integer argument or return value. FIXME: Most C calling
/// convention requires the return type to be promoted, but this is not true
@@ -2168,10 +2296,19 @@ public:
return VT.bitsLT(MinVT) ? MinVT : VT;
}
+ /// For some targets, an LLVM struct type must be broken down into multiple
+ /// simple types, but the calling convention specifies that the entire struct
+ /// must be passed in a block of consecutive registers.
+ virtual bool
+ functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
+ bool isVarArg) const {
+ return false;
+ }
+
/// Returns a 0 terminated array of registers that can be safely used as
/// scratch registers.
- virtual const uint16_t *getScratchRegisters(CallingConv::ID CC) const {
- return NULL;
+ virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
+ return nullptr;
}
/// This callback is used to prepare for a volatile or atomic load.
@@ -2232,7 +2369,7 @@ public:
/// target does not support "fast" ISel.
virtual FastISel *createFastISel(FunctionLoweringInfo &,
const TargetLibraryInfo *) const {
- return 0;
+ return nullptr;
}
@@ -2306,7 +2443,7 @@ public:
AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
: InlineAsm::ConstraintInfo(info),
ConstraintType(TargetLowering::C_Unknown),
- CallOperandVal(0), ConstraintVT(MVT::Other) {
+ CallOperandVal(nullptr), ConstraintVT(MVT::Other) {
}
};
@@ -2334,7 +2471,7 @@ public:
/// Op, otherwise an empty SDValue can be passed.
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
SDValue Op,
- SelectionDAG *DAG = 0) const;
+ SelectionDAG *DAG = nullptr) const;
/// Given a constraint, return the type of constraint it is for this target.
virtual ConstraintType getConstraintType(const std::string &Constraint) const;
@@ -2368,10 +2505,30 @@ public:
//
SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl,
SelectionDAG &DAG) const;
- SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
- std::vector<SDNode*> *Created) const;
- SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
- std::vector<SDNode*> *Created) const;
+ SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
+ bool IsAfterLegalization,
+ std::vector<SDNode *> *Created) const;
+ SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
+ bool IsAfterLegalization,
+ std::vector<SDNode *> *Created) const;
+
+ //===--------------------------------------------------------------------===//
+ // Legalization utility functions
+ //
+
+ /// Expand a MUL into two nodes. One that computes the high bits of
+ /// the result and one that computes the low bits.
+ /// \param HiLoVT The value type to use for the Lo and Hi nodes.
+ /// \param LL Low bits of the LHS of the MUL. You can use this parameter
+ /// if you want to control how low bits are extracted from the LHS.
+ /// \param LH High bits of the LHS of the MUL. See LL for meaning.
+ /// \param RL Low bits of the RHS of the MUL. See LL for meaning
+ /// \param RH High bits of the RHS of the MUL. See LL for meaning.
+ /// \returns true if the node has been expanded. false if it has not
+ bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
+ SelectionDAG &DAG, SDValue LL = SDValue(),
+ SDValue LH = SDValue(), SDValue RL = SDValue(),
+ SDValue RH = SDValue()) const;
//===--------------------------------------------------------------------===//
// Instruction Emitting Hooks
diff --git a/include/llvm/Target/TargetLoweringObjectFile.h b/include/llvm/Target/TargetLoweringObjectFile.h
index cdb7ea6..374a163 100644
--- a/include/llvm/Target/TargetLoweringObjectFile.h
+++ b/include/llvm/Target/TargetLoweringObjectFile.h
@@ -44,7 +44,7 @@ class TargetLoweringObjectFile : public MCObjectFileInfo {
public:
MCContext &getContext() const { return *Ctx; }
- TargetLoweringObjectFile() : MCObjectFileInfo(), Ctx(0), DL(0) {}
+ TargetLoweringObjectFile() : MCObjectFileInfo(), Ctx(nullptr), DL(nullptr) {}
virtual ~TargetLoweringObjectFile();
@@ -104,7 +104,7 @@ public:
virtual const MCSection *getSpecialCasedSectionGlobals(const GlobalValue *GV,
SectionKind Kind,
Mangler &Mang) const {
- return 0;
+ return nullptr;
}
/// Return an MCExpr to use for a reference to the specified global variable
@@ -130,14 +130,15 @@ public:
getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding,
MCStreamer &Streamer) const;
- virtual const MCSection *
- getStaticCtorSection(unsigned Priority = 65535) const {
- (void)Priority;
+ virtual const MCSection *getStaticCtorSection(unsigned Priority,
+ const MCSymbol *KeySym,
+ const MCSection *KeySec) const {
return StaticCtorSection;
}
- virtual const MCSection *
- getStaticDtorSection(unsigned Priority = 65535) const {
- (void)Priority;
+
+ virtual const MCSection *getStaticDtorSection(unsigned Priority,
+ const MCSymbol *KeySym,
+ const MCSection *KeySec) const {
return StaticDtorSection;
}
@@ -148,7 +149,7 @@ public:
virtual const MCExpr *
getExecutableRelativeSymbol(const ConstantExpr *CE, Mangler &Mang,
const TargetMachine &TM) const {
- return 0;
+ return nullptr;
}
/// \brief True if the section is atomized using the symbols in it.
diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h
index ce3f866..b263c57 100644
--- a/include/llvm/Target/TargetMachine.h
+++ b/include/llvm/Target/TargetMachine.h
@@ -84,11 +84,6 @@ protected: // Can only create subclasses.
///
const MCAsmInfo *AsmInfo;
- unsigned MCRelaxAll : 1;
- unsigned MCNoExecStack : 1;
- unsigned MCSaveTempLabels : 1;
- unsigned MCUseCFI : 1;
- unsigned MCUseDwarfDirectory : 1;
unsigned RequireStructuredCFG : 1;
public:
@@ -102,7 +97,9 @@ public:
/// getSubtargetImpl - virtual method implemented by subclasses that returns
/// a reference to that target's TargetSubtargetInfo-derived member variable.
- virtual const TargetSubtargetInfo *getSubtargetImpl() const { return 0; }
+ virtual const TargetSubtargetInfo *getSubtargetImpl() const {
+ return nullptr;
+ }
mutable TargetOptions Options;
@@ -118,11 +115,15 @@ public:
//
// N.B. These objects may change during compilation. It's not safe to cache
// them between functions.
- virtual const TargetInstrInfo *getInstrInfo() const { return 0; }
- virtual const TargetFrameLowering *getFrameLowering() const { return 0; }
- virtual const TargetLowering *getTargetLowering() const { return 0; }
- virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; }
- virtual const DataLayout *getDataLayout() const { return 0; }
+ virtual const TargetInstrInfo *getInstrInfo() const { return nullptr; }
+ virtual const TargetFrameLowering *getFrameLowering() const {
+ return nullptr;
+ }
+ virtual const TargetLowering *getTargetLowering() const { return nullptr; }
+ virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const {
+ return nullptr;
+ }
+ virtual const DataLayout *getDataLayout() const { return nullptr; }
/// getMCAsmInfo - Return target specific asm information.
///
@@ -139,64 +140,28 @@ public:
/// not, return null. This is kept separate from RegInfo until RegInfo has
/// details of graph coloring register allocation removed from it.
///
- virtual const TargetRegisterInfo *getRegisterInfo() const { return 0; }
+ virtual const TargetRegisterInfo *getRegisterInfo() const { return nullptr; }
/// getIntrinsicInfo - If intrinsic information is available, return it. If
/// not, return null.
///
- virtual const TargetIntrinsicInfo *getIntrinsicInfo() const { return 0; }
+ virtual const TargetIntrinsicInfo *getIntrinsicInfo() const { return nullptr;}
/// getJITInfo - If this target supports a JIT, return information for it,
/// otherwise return null.
///
- virtual TargetJITInfo *getJITInfo() { return 0; }
+ virtual TargetJITInfo *getJITInfo() { return nullptr; }
/// getInstrItineraryData - Returns instruction itinerary data for the target
/// or specific subtarget.
///
virtual const InstrItineraryData *getInstrItineraryData() const {
- return 0;
+ return nullptr;
}
bool requiresStructuredCFG() const { return RequireStructuredCFG; }
void setRequiresStructuredCFG(bool Value) { RequireStructuredCFG = Value; }
- /// hasMCRelaxAll - Check whether all machine code instructions should be
- /// relaxed.
- bool hasMCRelaxAll() const { return MCRelaxAll; }
-
- /// setMCRelaxAll - Set whether all machine code instructions should be
- /// relaxed.
- void setMCRelaxAll(bool Value) { MCRelaxAll = Value; }
-
- /// hasMCSaveTempLabels - Check whether temporary labels will be preserved
- /// (i.e., not treated as temporary).
- bool hasMCSaveTempLabels() const { return MCSaveTempLabels; }
-
- /// setMCSaveTempLabels - Set whether temporary labels will be preserved
- /// (i.e., not treated as temporary).
- void setMCSaveTempLabels(bool Value) { MCSaveTempLabels = Value; }
-
- /// hasMCNoExecStack - Check whether an executable stack is not needed.
- bool hasMCNoExecStack() const { return MCNoExecStack; }
-
- /// setMCNoExecStack - Set whether an executabel stack is not needed.
- void setMCNoExecStack(bool Value) { MCNoExecStack = Value; }
-
- /// hasMCUseCFI - Check whether we should use dwarf's .cfi_* directives.
- bool hasMCUseCFI() const { return MCUseCFI; }
-
- /// setMCUseCFI - Set whether all we should use dwarf's .cfi_* directives.
- void setMCUseCFI(bool Value) { MCUseCFI = Value; }
-
- /// hasMCUseDwarfDirectory - Check whether we should use .file directives with
- /// explicit directories.
- bool hasMCUseDwarfDirectory() const { return MCUseDwarfDirectory; }
-
- /// setMCUseDwarfDirectory - Set whether all we should use .file directives
- /// with explicit directories.
- void setMCUseDwarfDirectory(bool Value) { MCUseDwarfDirectory = Value; }
-
/// getRelocationModel - Returns the code generation relocation model. The
/// choices are static, PIC, and dynamic-no-pic, and target default.
Reloc::Model getRelocationModel() const;
@@ -222,26 +187,26 @@ public:
/// getAsmVerbosityDefault - Returns the default value of asm verbosity.
///
- static bool getAsmVerbosityDefault();
+ bool getAsmVerbosityDefault() const ;
/// setAsmVerbosityDefault - Set the default value of asm verbosity. Default
/// is false.
- static void setAsmVerbosityDefault(bool);
+ void setAsmVerbosityDefault(bool);
/// getDataSections - Return true if data objects should be emitted into their
/// own section, corresponds to -fdata-sections.
- static bool getDataSections();
+ bool getDataSections() const;
/// getFunctionSections - Return true if functions should be emitted into
/// their own section, corresponding to -ffunction-sections.
- static bool getFunctionSections();
+ bool getFunctionSections() const;
/// setDataSections - Set if the data are emit into separate sections.
- static void setDataSections(bool);
+ void setDataSections(bool);
/// setFunctionSections - Set if the functions are emit into separate
/// sections.
- static void setFunctionSections(bool);
+ void setFunctionSections(bool);
/// \brief Register analysis passes for this target with a pass manager.
virtual void addAnalysisPasses(PassManagerBase &) {}
@@ -263,8 +228,8 @@ public:
formatted_raw_ostream &,
CodeGenFileType,
bool /*DisableVerify*/ = true,
- AnalysisID /*StartAfter*/ = 0,
- AnalysisID /*StopAfter*/ = 0) {
+ AnalysisID /*StartAfter*/ = nullptr,
+ AnalysisID /*StopAfter*/ = nullptr) {
return true;
}
@@ -323,8 +288,8 @@ public:
/// generation.
bool addPassesToEmitFile(PassManagerBase &PM, formatted_raw_ostream &Out,
CodeGenFileType FileType, bool DisableVerify = true,
- AnalysisID StartAfter = 0,
- AnalysisID StopAfter = 0) override;
+ AnalysisID StartAfter = nullptr,
+ AnalysisID StopAfter = nullptr) override;
/// addPassesToEmitMachineCode - Add passes to the specified pass manager to
/// get machine code emitted. This uses a JITCodeEmitter object to handle
diff --git a/include/llvm/Target/TargetOptions.h b/include/llvm/Target/TargetOptions.h
index 1f87343..636eaf5 100644
--- a/include/llvm/Target/TargetOptions.h
+++ b/include/llvm/Target/TargetOptions.h
@@ -15,6 +15,7 @@
#ifndef LLVM_TARGET_TARGETOPTIONS_H
#define LLVM_TARGET_TARGETOPTIONS_H
+#include "llvm/MC/MCTargetOptions.h"
#include <string>
namespace llvm {
@@ -49,9 +50,10 @@ namespace llvm {
JITEmitDebugInfoToDisk(false), GuaranteedTailCallOpt(false),
DisableTailCalls(false), StackAlignmentOverride(0),
EnableFastISel(false), PositionIndependentExecutable(false),
- EnableSegmentedStacks(false), UseInitArray(false),
- DisableIntegratedAS(false), CompressDebugSections(false),
- TrapFuncName(""), FloatABIType(FloatABI::Default),
+ UseInitArray(false), DisableIntegratedAS(false),
+ CompressDebugSections(false), FunctionSections(false),
+ DataSections(false), TrapUnreachable(false), TrapFuncName(""),
+ FloatABIType(FloatABI::Default),
AllowFPOpFusion(FPOpFusion::Standard) {}
/// PrintMachineCode - This flag is enabled when the -print-machineinstrs
@@ -152,8 +154,6 @@ namespace llvm {
/// if the relocation model is anything other than PIC.
unsigned PositionIndependentExecutable : 1;
- unsigned EnableSegmentedStacks : 1;
-
/// UseInitArray - Use .init_array instead of .ctors for static
/// constructors.
unsigned UseInitArray : 1;
@@ -164,6 +164,15 @@ namespace llvm {
/// Compress DWARF debug sections.
unsigned CompressDebugSections : 1;
+ /// Emit functions into separate sections.
+ unsigned FunctionSections : 1;
+
+ /// Emit data into separate sections.
+ unsigned DataSections : 1;
+
+ /// Emit target-specific trap instruction for 'unreachable' IR instructions.
+ unsigned TrapUnreachable : 1;
+
/// getTrapFunctionName - If this returns a non-empty string, this means
/// isel should lower Intrinsic::trap to a call to the specified function
/// name instead of an ISD::TRAP node.
@@ -195,6 +204,9 @@ namespace llvm {
/// via the llvm.fma.* intrinsic) will always be honored, regardless of
/// the value of this option.
FPOpFusion::FPOpFusionMode AllowFPOpFusion;
+
+ /// Machine level options.
+ MCTargetOptions MCOptions;
};
// Comparison operators:
@@ -217,11 +229,12 @@ inline bool operator==(const TargetOptions &LHS,
ARE_EQUAL(StackAlignmentOverride) &&
ARE_EQUAL(EnableFastISel) &&
ARE_EQUAL(PositionIndependentExecutable) &&
- ARE_EQUAL(EnableSegmentedStacks) &&
ARE_EQUAL(UseInitArray) &&
+ ARE_EQUAL(TrapUnreachable) &&
ARE_EQUAL(TrapFuncName) &&
ARE_EQUAL(FloatABIType) &&
- ARE_EQUAL(AllowFPOpFusion);
+ ARE_EQUAL(AllowFPOpFusion) &&
+ ARE_EQUAL(MCOptions);
#undef ARE_EQUAL
}
diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h
index b0c21c1..a162297 100644
--- a/include/llvm/Target/TargetRegisterInfo.h
+++ b/include/llvm/Target/TargetRegisterInfo.h
@@ -174,7 +174,7 @@ public:
/// isASubClass - return true if this TargetRegisterClass is a subset
/// class of at least one other TargetRegisterClass.
bool isASubClass() const {
- return SuperClasses[0] != 0;
+ return SuperClasses[0] != nullptr;
}
/// getRawAllocationOrder - Returns the preferred order for allocating
@@ -317,7 +317,7 @@ public:
/// indicating if a register is allocatable or not. If a register class is
/// specified, returns the subset for the class.
BitVector getAllocatableSet(const MachineFunction &MF,
- const TargetRegisterClass *RC = NULL) const;
+ const TargetRegisterClass *RC = nullptr) const;
/// getCostPerUse - Return the additional cost of using this register instead
/// of other registers in its class.
@@ -420,8 +420,8 @@ public:
/// order of desired callee-save stack frame offset. The first register is
/// closest to the incoming stack pointer if stack grows down, and vice versa.
///
- virtual const MCPhysReg* getCalleeSavedRegs(const MachineFunction *MF = 0)
- const = 0;
+ virtual const MCPhysReg*
+ getCalleeSavedRegs(const MachineFunction *MF = nullptr) const = 0;
/// getCallPreservedMask - Return a mask of call-preserved registers for the
/// given calling convention on the current sub-target. The mask should
@@ -443,7 +443,7 @@ public:
///
virtual const uint32_t *getCallPreservedMask(CallingConv::ID) const {
// The default mask clobbers everything. All targets should override.
- return 0;
+ return nullptr;
}
/// getReservedRegs - Returns a bitset indexed by physical register number
@@ -651,7 +651,7 @@ public:
ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints,
const MachineFunction &MF,
- const VirtRegMap *VRM = 0) const;
+ const VirtRegMap *VRM = nullptr) const;
/// avoidWriteAfterWrite - Return true if the register allocator should avoid
/// writing a register from RC in two consecutive instructions.
@@ -805,7 +805,7 @@ public:
/// instruction. FIOperandNum is the FI operand number.
virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, unsigned FIOperandNum,
- RegScavenger *RS = NULL) const = 0;
+ RegScavenger *RS = nullptr) const = 0;
//===--------------------------------------------------------------------===//
/// Debug information queries.
@@ -874,7 +874,7 @@ public:
Mask += RCMaskWords;
SubReg = *Idx++;
if (!SubReg)
- Idx = 0;
+ Idx = nullptr;
}
};
@@ -902,7 +902,7 @@ class PrintReg {
unsigned Reg;
unsigned SubIdx;
public:
- explicit PrintReg(unsigned reg, const TargetRegisterInfo *tri = 0,
+ explicit PrintReg(unsigned reg, const TargetRegisterInfo *tri = nullptr,
unsigned subidx = 0)
: TRI(tri), Reg(reg), SubIdx(subidx) {}
void print(raw_ostream&) const;
diff --git a/include/llvm/Target/TargetSchedule.td b/include/llvm/Target/TargetSchedule.td
index b4d0c44..e6eeb88 100644
--- a/include/llvm/Target/TargetSchedule.td
+++ b/include/llvm/Target/TargetSchedule.td
@@ -79,6 +79,8 @@ class SchedMachineModel {
int MinLatency = -1; // Determines which instructions are allowed in a group.
// (-1) inorder (0) ooo, (1): inorder +var latencies.
int MicroOpBufferSize = -1; // Max micro-ops that can be buffered.
+ int LoopMicroOpBufferSize = -1; // Max micro-ops that can be buffered for
+ // optimized loop dispatch/execution.
int LoadLatency = -1; // Cycles for loads to access the cache.
int HighLatency = -1; // Approximation of cycles for "high latency" ops.
int MispredictPenalty = -1; // Extra cycles for a mispredicted branch.
diff --git a/include/llvm/Target/TargetSubtargetInfo.h b/include/llvm/Target/TargetSubtargetInfo.h
index 1b2e06a..c0c342b 100644
--- a/include/llvm/Target/TargetSubtargetInfo.h
+++ b/include/llvm/Target/TargetSubtargetInfo.h
@@ -76,6 +76,11 @@ public:
MachineInstr *end,
unsigned NumRegionInstrs) const {}
+ // \brief Perform target specific adjustments to the latency of a schedule
+ // dependency.
+ virtual void adjustSchedDependency(SUnit *def, SUnit *use,
+ SDep& dep) const { }
+
// enablePostRAScheduler - If the target can benefit from post-regalloc
// scheduling and the specified optimization level meets the requirement
// return true to enable post-register-allocation scheduling. In
@@ -84,15 +89,14 @@ public:
virtual bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
AntiDepBreakMode& Mode,
RegClassVector& CriticalPathRCs) const;
- // adjustSchedDependency - Perform target specific adjustments to
- // the latency of a schedule dependency.
- virtual void adjustSchedDependency(SUnit *def, SUnit *use,
- SDep& dep) const { }
/// \brief Enable use of alias analysis during code generation (during MI
/// scheduling, DAGCombine, etc.).
virtual bool useAA() const;
+ /// \brief Enable the use of the early if conversion pass.
+ virtual bool enableEarlyIfConversion() const { return false; }
+
/// \brief Reset the features for the subtarget.
virtual void resetSubtargetFeatures(const MachineFunction *MF) { }
};
diff --git a/include/llvm/Transforms/IPO.h b/include/llvm/Transforms/IPO.h
index 334fb1c..ce1a7d6 100644
--- a/include/llvm/Transforms/IPO.h
+++ b/include/llvm/Transforms/IPO.h
@@ -58,21 +58,18 @@ ModulePass *createStripDeadDebugInfoPass();
///
ModulePass *createConstantMergePass();
-
//===----------------------------------------------------------------------===//
/// createGlobalOptimizerPass - This function returns a new pass that optimizes
/// non-address taken internal globals.
///
ModulePass *createGlobalOptimizerPass();
-
//===----------------------------------------------------------------------===//
/// createGlobalDCEPass - This transform is designed to eliminate unreachable
/// internal globals (functions or global variables)
///
ModulePass *createGlobalDCEPass();
-
//===----------------------------------------------------------------------===//
/// createGVExtractionPass - If deleteFn is true, this pass deletes
/// the specified global values. Otherwise, it deletes as much of the module as
diff --git a/include/llvm/Transforms/IPO/PassManagerBuilder.h b/include/llvm/Transforms/IPO/PassManagerBuilder.h
index 42b6b27..023de08 100644
--- a/include/llvm/Transforms/IPO/PassManagerBuilder.h
+++ b/include/llvm/Transforms/IPO/PassManagerBuilder.h
@@ -55,7 +55,6 @@ using legacy::FunctionPassManager;
/// ...
class PassManagerBuilder {
public:
-
/// Extensions are passed the builder itself (so they can see how it is
/// configured) as well as the pass manager to add stuff to.
typedef void (*ExtensionFn)(const PassManagerBuilder &Builder,
@@ -86,7 +85,12 @@ public:
/// EP_EnabledOnOptLevel0 - This extension point allows adding passes that
/// should not be disabled by O0 optimization level. The passes will be
/// inserted after the inlining pass.
- EP_EnabledOnOptLevel0
+ EP_EnabledOnOptLevel0,
+
+ /// EP_Peephole - This extension point allows adding passes that perform
+ /// peephole optimizations similar to the instruction combiner. These passes
+ /// will be inserted after each instance of the instruction combiner pass.
+ EP_Peephole,
};
/// The Optimization Level - Specify the basic optimization level.
@@ -106,6 +110,7 @@ public:
/// added to the per-module passes.
Pass *Inliner;
+ bool DisableTailCalls;
bool DisableUnitAtATime;
bool DisableUnrollLoops;
bool BBVectorize;
@@ -129,8 +134,8 @@ public:
private:
void addExtensionsToPM(ExtensionPointTy ETy, PassManagerBase &PM) const;
void addInitialAliasAnalysisPasses(PassManagerBase &PM) const;
-public:
+public:
/// populateFunctionPassManager - This fills in the function pass manager,
/// which is expected to be run on each function immediately as it is
/// generated. The idea is to reduce the size of the IR in memory.
diff --git a/include/llvm/Transforms/Instrumentation.h b/include/llvm/Transforms/Instrumentation.h
index b527546..61d5c26 100644
--- a/include/llvm/Transforms/Instrumentation.h
+++ b/include/llvm/Transforms/Instrumentation.h
@@ -79,8 +79,8 @@ FunctionPass *createThreadSanitizerPass(StringRef BlacklistFile = StringRef());
// Insert DataFlowSanitizer (dynamic data flow analysis) instrumentation
ModulePass *createDataFlowSanitizerPass(StringRef ABIListFile = StringRef(),
- void *(*getArgTLS)() = 0,
- void *(*getRetValTLS)() = 0);
+ void *(*getArgTLS)() = nullptr,
+ void *(*getRetValTLS)() = nullptr);
#if defined(__GNUC__) && defined(__linux__) && !defined(ANDROID)
inline ModulePass *createDataFlowSanitizerPassForJIT(StringRef ABIListFile =
diff --git a/include/llvm/Transforms/ObjCARC.h b/include/llvm/Transforms/ObjCARC.h
index b3c19c0..1897adc 100644
--- a/include/llvm/Transforms/ObjCARC.h
+++ b/include/llvm/Transforms/ObjCARC.h
@@ -46,4 +46,3 @@ Pass *createObjCARCOptPass();
} // End llvm namespace
#endif
-
diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h
index 7267222..cf1d655 100644
--- a/include/llvm/Transforms/Scalar.h
+++ b/include/llvm/Transforms/Scalar.h
@@ -122,7 +122,7 @@ Pass *createLICMPass();
//
Pass *createLoopStrengthReducePass();
-Pass *createGlobalMergePass(const TargetMachine *TM = 0);
+Pass *createGlobalMergePass(const TargetMachine *TM = nullptr);
//===----------------------------------------------------------------------===//
//
@@ -155,14 +155,14 @@ Pass *createLoopRerollPass();
//
// LoopRotate - This pass is a simple loop rotating pass.
//
-Pass *createLoopRotatePass();
+Pass *createLoopRotatePass(int MaxHeaderSize = -1);
//===----------------------------------------------------------------------===//
//
// LoopIdiom - This pass recognizes and replaces idioms in loops.
//
Pass *createLoopIdiomPass();
-
+
//===----------------------------------------------------------------------===//
//
// PromoteMemoryToRegister - This pass is used to promote memory references to
@@ -201,7 +201,7 @@ FunctionPass *createReassociatePass();
// preds always go to some succ.
//
FunctionPass *createJumpThreadingPass();
-
+
//===----------------------------------------------------------------------===//
//
// CFGSimplification - Merge basic blocks, eliminate unreachable blocks,
@@ -284,10 +284,10 @@ extern char &LCSSAID;
// tree.
//
FunctionPass *createEarlyCSEPass();
-
+
//===----------------------------------------------------------------------===//
//
-// GVN - This pass performs global value numbering and redundant load
+// GVN - This pass performs global value numbering and redundant load
// elimination cotemporaneously.
//
FunctionPass *createGVNPass(bool NoLoads = false);
@@ -305,7 +305,7 @@ FunctionPass *createMemCpyOptPass();
// can prove are dead.
//
Pass *createLoopDeletionPass();
-
+
//===----------------------------------------------------------------------===//
//
// ConstantHoisting - This pass prepares a function for expensive constants.
@@ -318,7 +318,7 @@ FunctionPass *createConstantHoistingPass();
//
FunctionPass *createInstructionNamerPass();
extern char &InstructionNamerID;
-
+
//===----------------------------------------------------------------------===//
//
// Sink - Code Sinking
@@ -344,14 +344,12 @@ Pass *createCorrelatedValuePropagationPass();
FunctionPass *createInstructionSimplifierPass();
extern char &InstructionSimplifierID;
-
//===----------------------------------------------------------------------===//
//
// LowerExpectIntrinsics - Removes llvm.expect intrinsics and creates
// "block_weights" metadata.
FunctionPass *createLowerExpectIntrinsicPass();
-
//===----------------------------------------------------------------------===//
//
// PartiallyInlineLibCalls - Tries to inline the fast path of library
@@ -377,6 +375,12 @@ FunctionPass *createScalarizerPass();
// AddDiscriminators - Add DWARF path discriminators to the IR.
FunctionPass *createAddDiscriminatorsPass();
+//===----------------------------------------------------------------------===//
+//
+// SeparateConstOffsetFromGEP - Split GEPs for better CSE
+//
+FunctionPass *createSeparateConstOffsetFromGEPPass();
+
} // End llvm namespace
#endif
diff --git a/include/llvm/Transforms/Utils/BasicBlockUtils.h b/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 4d5e305..7309f69 100644
--- a/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -34,23 +34,22 @@ class TerminatorInst;
/// predecessors.
void DeleteDeadBlock(BasicBlock *BB);
-
/// FoldSingleEntryPHINodes - We know that BB has one predecessor. If there are
/// any single-entry PHI nodes in it, fold them away. This handles the case
/// when all entries to the PHI nodes in a block are guaranteed equal, such as
/// when the block has exactly one predecessor.
-void FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P = 0);
+void FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P = nullptr);
/// DeleteDeadPHIs - Examine each PHI in the given block and delete it if it
/// is dead. Also recursively delete any operands that become dead as
/// a result. This includes tracing the def-use list from the PHI to see if
/// it is ultimately unused or if it reaches an unused cycle. Return true
/// if any PHIs were deleted.
-bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = 0);
+bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr);
/// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor,
/// if possible. The return value indicates success or failure.
-bool MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P = 0);
+bool MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P = nullptr);
// ReplaceInstWithValue - Replace all uses of an instruction (specified by BI)
// with a value, then remove and delete the original instruction.
@@ -89,12 +88,13 @@ void ReplaceInstWithInst(Instruction *From, Instruction *To);
/// to.
///
BasicBlock *SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
- Pass *P = 0, bool MergeIdenticalEdges = false,
+ Pass *P = nullptr,
+ bool MergeIdenticalEdges = false,
bool DontDeleteUselessPHIs = false,
bool SplitLandingPads = false);
inline BasicBlock *SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
- Pass *P = 0) {
+ Pass *P = nullptr) {
return SplitCriticalEdge(BB->getTerminator(), SI.getSuccessorIndex(), P);
}
@@ -103,7 +103,8 @@ inline BasicBlock *SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
/// This updates all of the same analyses as the other SplitCriticalEdge
/// function. If P is specified, it updates the analyses
/// described above.
-inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI, Pass *P = 0) {
+inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI,
+ Pass *P = nullptr) {
bool MadeChange = false;
TerminatorInst *TI = (*PI)->getTerminator();
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
@@ -117,7 +118,7 @@ inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI, Pass *P = 0) {
/// an edge between the two blocks. If P is specified, it updates the analyses
/// described above.
inline BasicBlock *SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
- Pass *P = 0,
+ Pass *P = nullptr,
bool MergeIdenticalEdges = false,
bool DontDeleteUselessPHIs = false) {
TerminatorInst *TI = Src->getTerminator();
@@ -155,7 +156,7 @@ BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P);
/// is an exit of a loop with other exits).
///
BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock*> Preds,
- const char *Suffix, Pass *P = 0);
+ const char *Suffix, Pass *P = nullptr);
/// SplitLandingPadPredecessors - This method transforms the landing pad,
/// OrigBB, by introducing two new basic blocks into the function. One of those
@@ -203,8 +204,7 @@ ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
/// Returns the NewBasicBlock's terminator.
TerminatorInst *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
bool Unreachable,
- MDNode *BranchWeights = 0);
-
+ MDNode *BranchWeights = nullptr);
/// SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen,
/// but also creates the ElseBlock.
@@ -223,7 +223,7 @@ TerminatorInst *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
void SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
TerminatorInst **ThenTerm,
TerminatorInst **ElseTerm,
- MDNode *BranchWeights = 0);
+ MDNode *BranchWeights = nullptr);
///
/// GetIfCondition - Check whether BB is the merge point of a if-region.
diff --git a/include/llvm/Transforms/Utils/BuildLibCalls.h b/include/llvm/Transforms/Utils/BuildLibCalls.h
index 0f39ada..1e407fb 100644
--- a/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -21,7 +21,7 @@ namespace llvm {
class Value;
class DataLayout;
class TargetLibraryInfo;
-
+
/// CastToCStr - Return V if it is an i8*, otherwise cast it to i8*.
Value *CastToCStr(Value *V, IRBuilder<> &B);
@@ -124,6 +124,7 @@ namespace llvm {
virtual void replaceCall(Value *With) = 0;
virtual bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp,
bool isString) const = 0;
+
public:
virtual ~SimplifyFortifiedLibCalls();
bool fold(CallInst *CI, const DataLayout *TD, const TargetLibraryInfo *TLI);
diff --git a/include/llvm/Transforms/Utils/Cloning.h b/include/llvm/Transforms/Utils/Cloning.h
index 96c6508..bdf50dd 100644
--- a/include/llvm/Transforms/Utils/Cloning.h
+++ b/include/llvm/Transforms/Utils/Cloning.h
@@ -55,17 +55,16 @@ struct ClonedCodeInfo {
/// ContainsCalls - This is set to true if the cloned code contains a normal
/// call instruction.
bool ContainsCalls;
-
+
/// ContainsDynamicAllocas - This is set to true if the cloned code contains
/// a 'dynamic' alloca. Dynamic allocas are allocas that are either not in
/// the entry block or they are in the entry block but are not a constant
/// size.
bool ContainsDynamicAllocas;
-
+
ClonedCodeInfo() : ContainsCalls(false), ContainsDynamicAllocas(false) {}
};
-
/// CloneBasicBlock - Return a copy of the specified basic block, but without
/// embedding the block into a particular function. The block returned is an
/// exact copy of the specified basic block, without any remapping having been
@@ -96,8 +95,8 @@ struct ClonedCodeInfo {
///
BasicBlock *CloneBasicBlock(const BasicBlock *BB,
ValueToValueMapTy &VMap,
- const Twine &NameSuffix = "", Function *F = 0,
- ClonedCodeInfo *CodeInfo = 0);
+ const Twine &NameSuffix = "", Function *F = nullptr,
+ ClonedCodeInfo *CodeInfo = nullptr);
/// CloneFunction - Return a copy of the specified function, but without
/// embedding the function into another module. Also, any references specified
@@ -114,7 +113,7 @@ BasicBlock *CloneBasicBlock(const BasicBlock *BB,
Function *CloneFunction(const Function *F,
ValueToValueMapTy &VMap,
bool ModuleLevelChanges,
- ClonedCodeInfo *CodeInfo = 0);
+ ClonedCodeInfo *CodeInfo = nullptr);
/// Clone OldFunc into NewFunc, transforming the old arguments into references
/// to VMap values. Note that if NewFunc already has basic blocks, the ones
@@ -129,10 +128,10 @@ void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
ValueToValueMapTy &VMap,
bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
- const char *NameSuffix = "",
- ClonedCodeInfo *CodeInfo = 0,
- ValueMapTypeRemapper *TypeMapper = 0,
- ValueMaterializer *Materializer = 0);
+ const char *NameSuffix = "",
+ ClonedCodeInfo *CodeInfo = nullptr,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
/// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto,
/// except that it does some simple constant prop and DCE on the fly. The
@@ -149,19 +148,18 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
ValueToValueMapTy &VMap,
bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
- const char *NameSuffix = "",
- ClonedCodeInfo *CodeInfo = 0,
- const DataLayout *DL = 0,
- Instruction *TheCall = 0);
+ const char *NameSuffix = "",
+ ClonedCodeInfo *CodeInfo = nullptr,
+ const DataLayout *DL = nullptr,
+ Instruction *TheCall = nullptr);
-
/// InlineFunctionInfo - This class captures the data input to the
-/// InlineFunction call, and records the auxiliary results produced by it.
+/// InlineFunction call, and records the auxiliary results produced by it.
class InlineFunctionInfo {
public:
- explicit InlineFunctionInfo(CallGraph *cg = 0, const DataLayout *DL = 0)
+ explicit InlineFunctionInfo(CallGraph *cg = nullptr, const DataLayout *DL = nullptr)
: CG(cg), DL(DL) {}
-
+
/// CG - If non-null, InlineFunction will update the callgraph to reflect the
/// changes it makes.
CallGraph *CG;
@@ -174,13 +172,13 @@ public:
/// InlinedCalls - InlineFunction fills this in with callsites that were
/// inlined from the callee. This is only filled in if CG is non-null.
SmallVector<WeakVH, 8> InlinedCalls;
-
+
void reset() {
StaticAllocas.clear();
InlinedCalls.clear();
}
};
-
+
/// InlineFunction - This function inlines the called function into the basic
/// block of the caller. This returns false if it is not possible to inline
/// this call. The program is still in a well defined state if this occurs
diff --git a/include/llvm/Transforms/Utils/CmpInstAnalysis.h b/include/llvm/Transforms/Utils/CmpInstAnalysis.h
index 22469e0..73c15e4 100644
--- a/include/llvm/Transforms/Utils/CmpInstAnalysis.h
+++ b/include/llvm/Transforms/Utils/CmpInstAnalysis.h
@@ -63,4 +63,3 @@ namespace llvm {
} // end namespace llvm
#endif
-
diff --git a/include/llvm/Transforms/Utils/CodeExtractor.h b/include/llvm/Transforms/Utils/CodeExtractor.h
index 1122678..6b41e82 100644
--- a/include/llvm/Transforms/Utils/CodeExtractor.h
+++ b/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -66,7 +66,7 @@ namespace llvm {
/// dominates the rest, prepare a code extractor object for pulling this
/// sequence out into its new function. When a DominatorTree is also given,
/// extra checking and transformations are enabled.
- CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = 0,
+ CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = nullptr,
bool AggregateArgs = false);
/// \brief Create a code extractor for a loop body.
@@ -120,7 +120,6 @@ namespace llvm {
BasicBlock *newHeader,
ValueSet &inputs,
ValueSet &outputs);
-
};
}
diff --git a/include/llvm/Transforms/Utils/CtorUtils.h b/include/llvm/Transforms/Utils/CtorUtils.h
new file mode 100644
index 0000000..81e7b95
--- /dev/null
+++ b/include/llvm/Transforms/Utils/CtorUtils.h
@@ -0,0 +1,32 @@
+//===- CtorUtils.h - Helpers for working with global_ctors ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions that are used to process llvm.global_ctors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CTOR_UTILS_H
+#define LLVM_TRANSFORMS_UTILS_CTOR_UTILS_H
+
+#include "llvm/ADT/STLExtras.h"
+
+namespace llvm {
+
+class GlobalVariable;
+class Function;
+class Module;
+
+/// Call "ShouldRemove" for every entry in M's global_ctor list and remove the
+/// entries for which it returns true. Return true if anything changed.
+bool optimizeGlobalCtorsList(Module &M,
+ function_ref<bool(Function *)> ShouldRemove);
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/Transforms/Utils/IntegerDivision.h b/include/llvm/Transforms/Utils/IntegerDivision.h
index 55e8b66..0ec3321 100644
--- a/include/llvm/Transforms/Utils/IntegerDivision.h
+++ b/include/llvm/Transforms/Utils/IntegerDivision.h
@@ -55,16 +55,16 @@ namespace llvm {
/// @brief Replace Rem with generated code.
bool expandRemainderUpTo64Bits(BinaryOperator *Rem);
- /// Generate code to divide two integers, replacing Div with the generated
+ /// Generate code to divide two integers, replacing Div with the generated
/// code. Uses ExpandDivision with a 32bit Div which makes it useful for
/// targets with little or no support for less than 32 bit arithmetic.
- ///
+ ///
/// @brief Replace Rem with generated code.
bool expandDivisionUpTo32Bits(BinaryOperator *Div);
- /// Generate code to divide two integers, replacing Div with the generated
+ /// Generate code to divide two integers, replacing Div with the generated
/// code. Uses ExpandDivision with a 64bit Div.
- ///
+ ///
/// @brief Replace Rem with generated code.
bool expandDivisionUpTo64Bits(BinaryOperator *Div);
diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h
index c68fd06..6f64269 100644
--- a/include/llvm/Transforms/Utils/Local.h
+++ b/include/llvm/Transforms/Utils/Local.h
@@ -55,7 +55,7 @@ template<typename T> class SmallVectorImpl;
/// conditions and indirectbr addresses this might make dead if
/// DeleteDeadConditions is true.
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
- const TargetLibraryInfo *TLI = 0);
+ const TargetLibraryInfo *TLI = nullptr);
//===----------------------------------------------------------------------===//
// Local dead code elimination.
@@ -64,30 +64,31 @@ bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
/// isInstructionTriviallyDead - Return true if the result produced by the
/// instruction is not used, and the instruction has no side effects.
///
-bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=0);
+bool isInstructionTriviallyDead(Instruction *I,
+ const TargetLibraryInfo *TLI = nullptr);
/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
/// trivially dead instruction, delete it. If that makes any of its operands
/// trivially dead, delete them too, recursively. Return true if any
/// instructions were deleted.
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
- const TargetLibraryInfo *TLI=0);
+ const TargetLibraryInfo *TLI = nullptr);
/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
/// dead PHI node, due to being a def-use chain of single-use nodes that
/// either forms a cycle or is terminated by a trivially dead instruction,
/// delete it. If that makes any of its operands trivially dead, delete them
/// too, recursively. Return true if a change was made.
-bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=0);
-
+bool RecursivelyDeleteDeadPHINode(PHINode *PN,
+ const TargetLibraryInfo *TLI = nullptr);
/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
/// simplify any instructions in it and recursively delete dead instructions.
///
/// This returns true if it changed the code, note that it can delete
/// instructions in other blocks as well in this block.
-bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = 0,
- const TargetLibraryInfo *TLI = 0);
+bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = nullptr,
+ const TargetLibraryInfo *TLI = nullptr);
//===----------------------------------------------------------------------===//
// Control Flow Graph Restructuring.
@@ -105,16 +106,14 @@ bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = 0,
/// .. and delete the predecessor corresponding to the '1', this will attempt to
/// recursively fold the 'and' to 0.
void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
- DataLayout *TD = 0);
-
+ DataLayout *TD = nullptr);
/// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its
/// predecessor is known to have one successor (BB!). Eliminate the edge
/// between them, moving the instructions in the predecessor into BB. This
/// deletes the predecessor block.
///
-void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, Pass *P = 0);
-
+void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, Pass *P = nullptr);
/// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
/// unconditional branch, and contains no instructions other than PHI nodes,
@@ -137,13 +136,13 @@ bool EliminateDuplicatePHINodes(BasicBlock *BB);
/// the basic block that was pointed to.
///
bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
- const DataLayout *TD = 0);
+ const DataLayout *TD = nullptr);
/// FlatternCFG - This function is used to flatten a CFG. For
/// example, it uses parallel-and and parallel-or mode to collapse
// if-conditions and merge if-regions with identical statements.
///
-bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = 0);
+bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr);
/// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch,
/// and if a predecessor branches to us and one of our successors, fold the
@@ -159,22 +158,23 @@ bool FoldBranchToCommonDest(BranchInst *BI);
///
AllocaInst *DemoteRegToStack(Instruction &X,
bool VolatileLoads = false,
- Instruction *AllocaPoint = 0);
+ Instruction *AllocaPoint = nullptr);
/// DemotePHIToStack - This function takes a virtual register computed by a phi
/// node and replaces it with a slot in the stack frame, allocated via alloca.
/// The phi node is deleted and it returns the pointer to the alloca inserted.
-AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = 0);
+AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
/// getOrEnforceKnownAlignment - If the specified pointer has an alignment that
/// we can determine, return it, otherwise return 0. If PrefAlign is specified,
/// and it is more than the alignment of the ultimate object, see if we can
/// increase the alignment of the ultimate object, making this check succeed.
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
- const DataLayout *TD = 0);
+ const DataLayout *TD = nullptr);
/// getKnownAlignment - Try to infer an alignment for the specified pointer.
-static inline unsigned getKnownAlignment(Value *V, const DataLayout *TD = 0) {
+static inline unsigned getKnownAlignment(Value *V,
+ const DataLayout *TD = nullptr) {
return getOrEnforceKnownAlignment(V, 0, TD);
}
diff --git a/include/llvm/Transforms/Utils/LoopUtils.h b/include/llvm/Transforms/Utils/LoopUtils.h
index 64e18ca1..ee26d83 100644
--- a/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/include/llvm/Transforms/Utils/LoopUtils.h
@@ -32,7 +32,7 @@ BasicBlock *InsertPreheaderForLoop(Loop *L, Pass *P);
/// will optionally update \c AliasAnalysis and \c ScalarEvolution analyses if
/// passed into it.
bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP,
- AliasAnalysis *AA = 0, ScalarEvolution *SE = 0);
+ AliasAnalysis *AA = nullptr, ScalarEvolution *SE = nullptr);
/// \brief Put loop into LCSSA form.
///
@@ -45,7 +45,7 @@ bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP,
/// If ScalarEvolution is passed in, it will be preserved.
///
/// Returns true if any modifications are made to the loop.
-bool formLCSSA(Loop &L, DominatorTree &DT, ScalarEvolution *SE = 0);
+bool formLCSSA(Loop &L, DominatorTree &DT, ScalarEvolution *SE = nullptr);
/// \brief Put a loop nest into LCSSA form.
///
@@ -56,8 +56,8 @@ bool formLCSSA(Loop &L, DominatorTree &DT, ScalarEvolution *SE = 0);
/// If ScalarEvolution is passed in, it will be preserved.
///
/// Returns true if any modifications are made to the loop.
-bool formLCSSARecursively(Loop &L, DominatorTree &DT, ScalarEvolution *SE = 0);
-
+bool formLCSSARecursively(Loop &L, DominatorTree &DT,
+ ScalarEvolution *SE = nullptr);
}
#endif
diff --git a/include/llvm/Transforms/Utils/PromoteMemToReg.h b/include/llvm/Transforms/Utils/PromoteMemToReg.h
index 22f46e5..c83fedb 100644
--- a/include/llvm/Transforms/Utils/PromoteMemToReg.h
+++ b/include/llvm/Transforms/Utils/PromoteMemToReg.h
@@ -41,7 +41,7 @@ bool isAllocaPromotable(const AllocaInst *AI);
/// If AST is specified, the specified tracker is updated to reflect changes
/// made to the IR.
void PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
- AliasSetTracker *AST = 0);
+ AliasSetTracker *AST = nullptr);
} // End llvm namespace
diff --git a/include/llvm/Transforms/Utils/SSAUpdater.h b/include/llvm/Transforms/Utils/SSAUpdater.h
index 0c0e5de..7874a5f 100644
--- a/include/llvm/Transforms/Utils/SSAUpdater.h
+++ b/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -56,7 +56,7 @@ private:
public:
/// If InsertedPHIs is specified, it will be filled
/// in with all PHI Nodes created by rewriting.
- explicit SSAUpdater(SmallVectorImpl<PHINode*> *InsertedPHIs = 0);
+ explicit SSAUpdater(SmallVectorImpl<PHINode*> *InsertedPHIs = nullptr);
~SSAUpdater();
/// \brief Reset this object to get ready for a new set of SSA updates with
@@ -133,31 +133,31 @@ private:
class LoadAndStorePromoter {
protected:
SSAUpdater &SSA;
+
public:
LoadAndStorePromoter(const SmallVectorImpl<Instruction*> &Insts,
SSAUpdater &S, StringRef Name = StringRef());
virtual ~LoadAndStorePromoter() {}
-
+
/// \brief This does the promotion.
///
/// Insts is a list of loads and stores to promote, and Name is the basename
/// for the PHIs to insert. After this is complete, the loads and stores are
/// removed from the code.
void run(const SmallVectorImpl<Instruction*> &Insts) const;
-
-
+
/// \brief Return true if the specified instruction is in the Inst list.
///
/// The Insts list is the one passed into the constructor. Clients should
/// implement this with a more efficient version if possible.
virtual bool isInstInList(Instruction *I,
const SmallVectorImpl<Instruction*> &Insts) const;
-
+
/// \brief This hook is invoked after all the stores are found and inserted as
/// available values.
virtual void doExtraRewritesBeforeFinalDeletion() const {
}
-
+
/// \brief Clients can choose to implement this to get notified right before
/// a load is RAUW'd another value.
virtual void replaceLoadWithValue(LoadInst *LI, Value *V) const {
diff --git a/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
index 0f3da16..ed0841c 100644
--- a/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
+++ b/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
@@ -23,6 +23,8 @@
namespace llvm {
+#define DEBUG_TYPE "ssaupdater"
+
class CastInst;
class PHINode;
template<typename T> class SSAUpdaterTraits;
@@ -52,8 +54,8 @@ private:
PhiT *PHITag; // Marker for existing PHIs that match.
BBInfo(BlkT *ThisBB, ValT V)
- : BB(ThisBB), AvailableVal(V), DefBB(V ? this : 0), BlkNum(0), IDom(0),
- NumPreds(0), Preds(0), PHITag(0) { }
+ : BB(ThisBB), AvailableVal(V), DefBB(V ? this : nullptr), BlkNum(0),
+ IDom(nullptr), NumPreds(0), Preds(nullptr), PHITag(nullptr) {}
};
typedef DenseMap<BlkT*, ValT> AvailableValsTy;
@@ -115,7 +117,7 @@ public:
Traits::FindPredecessorBlocks(Info->BB, &Preds);
Info->NumPreds = Preds.size();
if (Info->NumPreds == 0)
- Info->Preds = 0;
+ Info->Preds = nullptr;
else
Info->Preds = static_cast<BBInfo**>
(Allocator.Allocate(Info->NumPreds * sizeof(BBInfo*),
@@ -148,7 +150,7 @@ public:
// Now that we know what blocks are backwards-reachable from the starting
// block, do a forward depth-first traversal to assign postorder numbers
// to those blocks.
- BBInfo *PseudoEntry = new (Allocator) BBInfo(0, 0);
+ BBInfo *PseudoEntry = new (Allocator) BBInfo(nullptr, 0);
unsigned BlkNum = 1;
// Initialize the worklist with the roots from the backward traversal.
@@ -231,7 +233,7 @@ public:
for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
E = BlockList->rend(); I != E; ++I) {
BBInfo *Info = *I;
- BBInfo *NewIDom = 0;
+ BBInfo *NewIDom = nullptr;
// Iterate through the block's predecessors.
for (unsigned p = 0; p != Info->NumPreds; ++p) {
@@ -386,7 +388,7 @@ public:
// Match failed: clear all the PHITag values.
for (typename BlockListTy::iterator I = BlockList->begin(),
E = BlockList->end(); I != E; ++I)
- (*I)->PHITag = 0;
+ (*I)->PHITag = nullptr;
}
}
@@ -451,6 +453,8 @@ public:
}
};
+#undef DEBUG_TYPE // "ssaupdater"
+
} // End llvm namespace
#endif
diff --git a/include/llvm/Transforms/Utils/SimplifyIndVar.h b/include/llvm/Transforms/Utils/SimplifyIndVar.h
index dedeca3..dcb1d67 100644
--- a/include/llvm/Transforms/Utils/SimplifyIndVar.h
+++ b/include/llvm/Transforms/Utils/SimplifyIndVar.h
@@ -37,8 +37,9 @@ protected:
bool ShouldSplitOverflowIntrinsics;
virtual void anchor();
+
public:
- IVVisitor(): DT(NULL), ShouldSplitOverflowIntrinsics(false) {}
+ IVVisitor(): DT(nullptr), ShouldSplitOverflowIntrinsics(false) {}
virtual ~IVVisitor() {}
const DominatorTree *getDomTree() const { return DT; }
@@ -57,7 +58,7 @@ public:
/// simplifyUsersOfIV - Simplify instructions that use this induction variable
/// by using ScalarEvolution to analyze the IV's recurrence.
bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, LPPassManager *LPM,
- SmallVectorImpl<WeakVH> &Dead, IVVisitor *V = NULL);
+ SmallVectorImpl<WeakVH> &Dead, IVVisitor *V = nullptr);
/// SimplifyLoopIVs - Simplify users of induction variables within this
/// loop. This does not actually change or add IVs.
diff --git a/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/include/llvm/Transforms/Utils/SimplifyLibCalls.h
index 6bb81be..a2a5f9a 100644
--- a/include/llvm/Transforms/Utils/SimplifyLibCalls.h
+++ b/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -30,6 +30,7 @@ namespace llvm {
/// Impl - A pointer to the actual implementation of the library call
/// simplifier.
LibCallSimplifierImpl *Impl;
+
public:
LibCallSimplifier(const DataLayout *TD, const TargetLibraryInfo *TLI,
bool UnsafeFPShrink);
diff --git a/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h b/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
index 79a70cf..7ac2572 100644
--- a/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
+++ b/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
@@ -24,10 +24,11 @@ namespace llvm {
struct UnifyFunctionExitNodes : public FunctionPass {
BasicBlock *ReturnBlock, *UnwindBlock, *UnreachableBlock;
+
public:
static char ID; // Pass identification, replacement for typeid
UnifyFunctionExitNodes() : FunctionPass(ID),
- ReturnBlock(0), UnwindBlock(0) {
+ ReturnBlock(nullptr), UnwindBlock(nullptr) {
initializeUnifyFunctionExitNodesPass(*PassRegistry::getPassRegistry());
}
diff --git a/include/llvm/Transforms/Utils/UnrollLoop.h b/include/llvm/Transforms/Utils/UnrollLoop.h
index 0bbd572..aaadd7d 100644
--- a/include/llvm/Transforms/Utils/UnrollLoop.h
+++ b/include/llvm/Transforms/Utils/UnrollLoop.h
@@ -29,7 +29,6 @@ bool UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool AllowRuntime,
bool UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
LPPassManager* LPM);
-
}
#endif
diff --git a/include/llvm/Transforms/Utils/ValueMapper.h b/include/llvm/Transforms/Utils/ValueMapper.h
index e96610e..5774763 100644
--- a/include/llvm/Transforms/Utils/ValueMapper.h
+++ b/include/llvm/Transforms/Utils/ValueMapper.h
@@ -28,7 +28,7 @@ namespace llvm {
virtual void anchor(); // Out of line method.
public:
virtual ~ValueMapTypeRemapper() {}
-
+
/// remapType - The client should implement this method if they want to
/// remap types while mapping values.
virtual Type *remapType(Type *SrcTy) = 0;
@@ -46,53 +46,52 @@ namespace llvm {
/// lazily.
virtual Value *materializeValueFor(Value *V) = 0;
};
-
+
/// RemapFlags - These are flags that the value mapping APIs allow.
enum RemapFlags {
RF_None = 0,
-
+
/// RF_NoModuleLevelChanges - If this flag is set, the remapper knows that
/// only local values within a function (such as an instruction or argument)
/// are mapped, not global values like functions and global metadata.
RF_NoModuleLevelChanges = 1,
-
+
/// RF_IgnoreMissingEntries - If this flag is set, the remapper ignores
/// entries that are not in the value map. If it is unset, it aborts if an
/// operand is asked to be remapped which doesn't exist in the mapping.
RF_IgnoreMissingEntries = 2
};
-
+
static inline RemapFlags operator|(RemapFlags LHS, RemapFlags RHS) {
return RemapFlags(unsigned(LHS)|unsigned(RHS));
}
-
+
Value *MapValue(const Value *V, ValueToValueMapTy &VM,
RemapFlags Flags = RF_None,
- ValueMapTypeRemapper *TypeMapper = 0,
- ValueMaterializer *Materializer = 0);
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
void RemapInstruction(Instruction *I, ValueToValueMapTy &VM,
RemapFlags Flags = RF_None,
- ValueMapTypeRemapper *TypeMapper = 0,
- ValueMaterializer *Materializer = 0);
-
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
/// MapValue - provide versions that preserve type safety for MDNode and
/// Constants.
inline MDNode *MapValue(const MDNode *V, ValueToValueMapTy &VM,
RemapFlags Flags = RF_None,
- ValueMapTypeRemapper *TypeMapper = 0,
- ValueMaterializer *Materializer = 0) {
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr) {
return cast<MDNode>(MapValue((const Value*)V, VM, Flags, TypeMapper,
Materializer));
}
inline Constant *MapValue(const Constant *V, ValueToValueMapTy &VM,
RemapFlags Flags = RF_None,
- ValueMapTypeRemapper *TypeMapper = 0,
- ValueMaterializer *Materializer = 0) {
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr) {
return cast<Constant>(MapValue((const Value*)V, VM, Flags, TypeMapper,
Materializer));
}
-
} // End llvm namespace
diff --git a/include/llvm/Transforms/Utils/VectorUtils.h b/include/llvm/Transforms/Utils/VectorUtils.h
new file mode 100644
index 0000000..e1d6c56
--- /dev/null
+++ b/include/llvm/Transforms/Utils/VectorUtils.h
@@ -0,0 +1,180 @@
+//===- llvm/Transforms/Utils/VectorUtils.h - Vector utilities -*- C++ -*-=====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some vectorizer utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_VECTORUTILS_H
+#define LLVM_TRANSFORMS_UTILS_VECTORUTILS_H
+
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/Target/TargetLibraryInfo.h"
+
+namespace llvm {
+
+/// \brief Identify if the intrinsic is trivially vectorizable.
+///
+/// This method returns true if the intrinsic's argument types are all
+/// scalars for the scalar form of the intrinsic and all vectors for
+/// the vector form of the intrinsic.
+static inline bool isTriviallyVectorizable(Intrinsic::ID ID) {
+ switch (ID) {
+ case Intrinsic::sqrt:
+ case Intrinsic::sin:
+ case Intrinsic::cos:
+ case Intrinsic::exp:
+ case Intrinsic::exp2:
+ case Intrinsic::log:
+ case Intrinsic::log10:
+ case Intrinsic::log2:
+ case Intrinsic::fabs:
+ case Intrinsic::copysign:
+ case Intrinsic::floor:
+ case Intrinsic::ceil:
+ case Intrinsic::trunc:
+ case Intrinsic::rint:
+ case Intrinsic::nearbyint:
+ case Intrinsic::round:
+ case Intrinsic::bswap:
+ case Intrinsic::ctpop:
+ case Intrinsic::pow:
+ case Intrinsic::fma:
+ case Intrinsic::fmuladd:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static Intrinsic::ID checkUnaryFloatSignature(const CallInst &I,
+ Intrinsic::ID ValidIntrinsicID) {
+ if (I.getNumArgOperands() != 1 ||
+ !I.getArgOperand(0)->getType()->isFloatingPointTy() ||
+ I.getType() != I.getArgOperand(0)->getType() ||
+ !I.onlyReadsMemory())
+ return Intrinsic::not_intrinsic;
+
+ return ValidIntrinsicID;
+}
+
+static Intrinsic::ID checkBinaryFloatSignature(const CallInst &I,
+ Intrinsic::ID ValidIntrinsicID) {
+ if (I.getNumArgOperands() != 2 ||
+ !I.getArgOperand(0)->getType()->isFloatingPointTy() ||
+ !I.getArgOperand(1)->getType()->isFloatingPointTy() ||
+ I.getType() != I.getArgOperand(0)->getType() ||
+ I.getType() != I.getArgOperand(1)->getType() ||
+ !I.onlyReadsMemory())
+ return Intrinsic::not_intrinsic;
+
+ return ValidIntrinsicID;
+}
+
+static Intrinsic::ID
+getIntrinsicIDForCall(CallInst *CI, const TargetLibraryInfo *TLI) {
+ // If we have an intrinsic call, check if it is trivially vectorizable.
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) {
+ Intrinsic::ID ID = II->getIntrinsicID();
+ if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
+ ID == Intrinsic::lifetime_end)
+ return ID;
+ else
+ return Intrinsic::not_intrinsic;
+ }
+
+ if (!TLI)
+ return Intrinsic::not_intrinsic;
+
+ LibFunc::Func Func;
+ Function *F = CI->getCalledFunction();
+ // We're going to make assumptions on the semantics of the functions, check
+ // that the target knows that it's available in this environment and it does
+ // not have local linkage.
+ if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(F->getName(), Func))
+ return Intrinsic::not_intrinsic;
+
+ // Otherwise check if we have a call to a function that can be turned into a
+ // vector intrinsic.
+ switch (Func) {
+ default:
+ break;
+ case LibFunc::sin:
+ case LibFunc::sinf:
+ case LibFunc::sinl:
+ return checkUnaryFloatSignature(*CI, Intrinsic::sin);
+ case LibFunc::cos:
+ case LibFunc::cosf:
+ case LibFunc::cosl:
+ return checkUnaryFloatSignature(*CI, Intrinsic::cos);
+ case LibFunc::exp:
+ case LibFunc::expf:
+ case LibFunc::expl:
+ return checkUnaryFloatSignature(*CI, Intrinsic::exp);
+ case LibFunc::exp2:
+ case LibFunc::exp2f:
+ case LibFunc::exp2l:
+ return checkUnaryFloatSignature(*CI, Intrinsic::exp2);
+ case LibFunc::log:
+ case LibFunc::logf:
+ case LibFunc::logl:
+ return checkUnaryFloatSignature(*CI, Intrinsic::log);
+ case LibFunc::log10:
+ case LibFunc::log10f:
+ case LibFunc::log10l:
+ return checkUnaryFloatSignature(*CI, Intrinsic::log10);
+ case LibFunc::log2:
+ case LibFunc::log2f:
+ case LibFunc::log2l:
+ return checkUnaryFloatSignature(*CI, Intrinsic::log2);
+ case LibFunc::fabs:
+ case LibFunc::fabsf:
+ case LibFunc::fabsl:
+ return checkUnaryFloatSignature(*CI, Intrinsic::fabs);
+ case LibFunc::copysign:
+ case LibFunc::copysignf:
+ case LibFunc::copysignl:
+ return checkBinaryFloatSignature(*CI, Intrinsic::copysign);
+ case LibFunc::floor:
+ case LibFunc::floorf:
+ case LibFunc::floorl:
+ return checkUnaryFloatSignature(*CI, Intrinsic::floor);
+ case LibFunc::ceil:
+ case LibFunc::ceilf:
+ case LibFunc::ceill:
+ return checkUnaryFloatSignature(*CI, Intrinsic::ceil);
+ case LibFunc::trunc:
+ case LibFunc::truncf:
+ case LibFunc::truncl:
+ return checkUnaryFloatSignature(*CI, Intrinsic::trunc);
+ case LibFunc::rint:
+ case LibFunc::rintf:
+ case LibFunc::rintl:
+ return checkUnaryFloatSignature(*CI, Intrinsic::rint);
+ case LibFunc::nearbyint:
+ case LibFunc::nearbyintf:
+ case LibFunc::nearbyintl:
+ return checkUnaryFloatSignature(*CI, Intrinsic::nearbyint);
+ case LibFunc::round:
+ case LibFunc::roundf:
+ case LibFunc::roundl:
+ return checkUnaryFloatSignature(*CI, Intrinsic::round);
+ case LibFunc::pow:
+ case LibFunc::powf:
+ case LibFunc::powl:
+ return checkBinaryFloatSignature(*CI, Intrinsic::pow);
+ }
+
+ return Intrinsic::not_intrinsic;
+}
+
+} // llvm namespace
+
+#endif
diff --git a/include/llvm/Transforms/Vectorize.h b/include/llvm/Transforms/Vectorize.h
index e93b39a..aec3993 100644
--- a/include/llvm/Transforms/Vectorize.h
+++ b/include/llvm/Transforms/Vectorize.h
@@ -47,6 +47,9 @@ struct VectorizeConfig {
/// @brief Vectorize floating-point math intrinsics.
bool VectorizeMath;
+ /// @brief Vectorize bit intrinsics.
+ bool VectorizeBitManipulations;
+
/// @brief Vectorize the fused-multiply-add intrinsic.
bool VectorizeFMA;
diff --git a/include/llvm/module.modulemap b/include/llvm/module.modulemap
new file mode 100644
index 0000000..1790a72
--- /dev/null
+++ b/include/llvm/module.modulemap
@@ -0,0 +1,177 @@
+module LLVM_Analysis {
+ requires cplusplus
+ umbrella "Analysis"
+ module * { export * }
+ exclude header "Analysis/BlockFrequencyInfoImpl.h"
+}
+
+module LLVM_AsmParser { requires cplusplus umbrella "AsmParser" module * { export * } }
+
+// A module covering CodeGen/ and Target/. These are intertwined
+// and codependent, and thus notionally form a single module.
+module LLVM_Backend {
+ requires cplusplus
+
+ module CodeGen {
+ umbrella "CodeGen"
+ module * { export * }
+
+ // FIXME: Why is this excluded?
+ exclude header "CodeGen/MachineValueType.h"
+
+ // Exclude these; they're intended to be included into only a single
+ // translation unit (or none) and aren't part of this module.
+ exclude header "CodeGen/CommandFlags.h"
+ exclude header "CodeGen/LinkAllAsmWriterComponents.h"
+ exclude header "CodeGen/LinkAllCodegenComponents.h"
+ }
+
+ module Target {
+ umbrella "Target"
+ module * { export * }
+ }
+
+ // FIXME: Where should this go?
+ module Analysis_BlockFrequencyInfoImpl {
+ header "Analysis/BlockFrequencyInfoImpl.h"
+ export *
+ }
+}
+
+module LLVM_Bitcode { requires cplusplus umbrella "Bitcode" module * { export * } }
+module LLVM_Config { requires cplusplus umbrella "Config" module * { export * } }
+module LLVM_DebugInfo { requires cplusplus umbrella "DebugInfo" module * { export * } }
+module LLVM_ExecutionEngine {
+ requires cplusplus
+
+ umbrella "ExecutionEngine"
+ module * { export * }
+
+ // Exclude this; it's an optional component of the ExecutionEngine.
+ exclude header "ExecutionEngine/OProfileWrapper.h"
+
+ // Exclude these; they're intended to be included into only a single
+ // translation unit (or none) and aren't part of this module.
+ exclude header "ExecutionEngine/JIT.h"
+ exclude header "ExecutionEngine/MCJIT.h"
+ exclude header "ExecutionEngine/Interpreter.h"
+}
+
+module LLVM_IR {
+ requires cplusplus
+
+ // FIXME: Is this the right place for these?
+ module Pass { header "Pass.h" export * }
+ module PassSupport { header "PassSupport.h" export * }
+ module PassAnalysisSupport { header "PassAnalysisSupport.h" export * }
+ module PassRegistry { header "PassRegistry.h" export * }
+ module InitializePasses { header "InitializePasses.h" export * }
+
+ umbrella "IR"
+ module * { export * }
+
+ // We cannot have llvm/PassManager.h and llvm/IR/PassManager.h in the same TU,
+ // so we can't include llvm/IR/PassManager.h in the IR module.
+ exclude header "IR/PassManager.h"
+ exclude header "IR/LegacyPassManager.h"
+
+ // Exclude this; it's intended for (repeated) textual inclusion.
+ exclude header "IR/Instruction.def"
+}
+
+module LLVM_LegacyPassManager {
+ requires cplusplus
+ module CompatInterface { header "PassManager.h" export * }
+ module Implementation { header "IR/LegacyPassManager.h" export * }
+}
+
+module LLVM_IR_PassManager {
+ requires cplusplus
+ // FIXME PR19358: This doesn't work! conflict LLVM_LegacyPassManager, "cannot use legacy pass manager and new pass manager in same file"
+ header "IR/PassManager.h"
+ export *
+}
+
+module LLVM_IRReader { requires cplusplus umbrella "IRReader" module * { export * } }
+module LLVM_LineEditor { requires cplusplus umbrella "LineEditor" module * { export * } }
+module LLVM_LTO { requires cplusplus umbrella "LTO" module * { export * } }
+
+module LLVM_MC {
+ requires cplusplus
+
+ // FIXME: Mislayered?
+ module Support_TargetRegistry {
+ header "Support/TargetRegistry.h"
+ export *
+ }
+
+ umbrella "MC"
+ module * { export * }
+
+ // Exclude this; it's fundamentally non-modular.
+ exclude header "MC/MCTargetOptionsCommandFlags.h"
+}
+
+module LLVM_Object { requires cplusplus umbrella "Object" module * { export * } }
+module LLVM_Option { requires cplusplus umbrella "Option" module * { export * } }
+module LLVM_TableGen { requires cplusplus umbrella "TableGen" module * { export * } }
+
+module LLVM_Transforms {
+ requires cplusplus
+ umbrella "Transforms"
+ module * { export * }
+
+ // FIXME: Excluded because it does bad things with the legacy pass manager.
+ exclude header "Transforms/IPO/PassManagerBuilder.h"
+}
+
+// A module covering ADT/ and Support/. These are intertwined and
+// codependent, and notionally form a single module.
+module LLVM_Utils {
+ module ADT {
+ requires cplusplus
+
+ umbrella "ADT"
+ module * { export * }
+ }
+
+ module Support {
+ requires cplusplus
+
+ umbrella "Support"
+ module * { export * }
+
+ // Exclude this; it's only included on Solaris.
+ exclude header "Support/Solaris.h"
+
+ // Exclude this; it's only included on AIX and fundamentally non-modular.
+ exclude header "Support/AIXDataTypesFix.h"
+
+ // Exclude this; it's fundamentally non-modular.
+ exclude header "Support/Debug.h"
+
+ // Exclude this; it's fundamentally non-modular.
+ exclude header "Support/PluginLoader.h"
+
+ // Exclude this; it's a weirdly-factored part of llvm-gcov and conflicts
+ // with the Analysis module (which also defines an llvm::GCOVOptions).
+ exclude header "Support/GCOV.h"
+
+ // FIXME: Mislayered?
+ exclude header "Support/TargetRegistry.h"
+ }
+}
+
+module LLVM_CodeGen_MachineValueType {
+ requires cplusplus
+ header "CodeGen/MachineValueType.h"
+ export *
+}
+
+// This is used for a $src == $build compilation. Otherwise we use
+// LLVM_Support_DataTypes_Build, defined in a module map that is
+// copied into the build area.
+module LLVM_Support_DataTypes_Src {
+ header "llvm/Support/DataTypes.h"
+ export *
+}
diff --git a/include/llvm/module.modulemap.build b/include/llvm/module.modulemap.build
new file mode 100644
index 0000000..7150fe9
--- /dev/null
+++ b/include/llvm/module.modulemap.build
@@ -0,0 +1,5 @@
+// This is copied into the build area for a $src != $build compilation.
+module LLVM_Support_DataTypes {
+ header "Support/DataTypes.h"
+ export *
+}