From 0cd7ec2dcd8d7ba30bf3ca420b40dac52849876c Mon Sep 17 00:00:00 2001 From: Brian Carlstrom Date: Wed, 17 Jul 2013 23:40:20 -0700 Subject: Fix cpplint whitespace/blank_line issues Change-Id: Ice937e95e23dd622c17054551d4ae4cebd0ef8a2 --- Android.mk | 6 +- compiler/dex/arena_allocator.h | 3 - compiler/dex/arena_bit_vector.h | 1 - compiler/dex/backend.h | 2 - compiler/dex/dataflow_iterator.h | 3 - compiler/dex/growable_array.h | 1 - compiler/dex/local_value_numbering.cc | 1 - compiler/dex/local_value_numbering.h | 1 - compiler/dex/mir_graph.cc | 1 - compiler/dex/mir_graph.h | 1 - compiler/dex/portable/mir_to_gbc.cc | 6 +- compiler/dex/portable/mir_to_gbc.h | 1 - compiler/dex/quick/arm/assemble_arm.cc | 1 - compiler/dex/quick/codegen_util.cc | 3 - compiler/dex/quick/gen_invoke.cc | 1 - compiler/dex/quick/local_optimizations.cc | 45 +++++++---- compiler/dex/quick/mips/codegen_mips.h | 2 - compiler/dex/quick/mir_to_lir.h | 1 - compiler/dex/quick/x86/codegen_x86.h | 1 - compiler/dex/ssa_transformation.cc | 97 ++++++++++++++++-------- compiler/driver/compiler_driver.cc | 2 +- compiler/elf_writer_mclinker.cc | 1 - compiler/elf_writer_mclinker.h | 1 - compiler/elf_writer_test.cc | 1 - compiler/jni/portable/jni_compiler.cc | 9 +-- compiler/jni/quick/x86/calling_convention_x86.cc | 1 - compiler/llvm/gbc_expander.cc | 5 -- compiler/llvm/ir_builder.h | 2 - compiler/llvm/llvm_compilation_unit.cc | 1 - compiler/oat_writer.cc | 1 - dex2oat/dex2oat.cc | 1 - runtime/atomic_integer.h | 3 +- runtime/barrier.cc | 2 +- runtime/barrier_test.cc | 9 +-- runtime/base/histogram-inl.h | 1 - runtime/base/histogram.h | 1 - runtime/base/timing_logger.h | 3 - runtime/debugger.cc | 2 - runtime/dex_method_iterator.h | 1 - runtime/gc/accounting/heap_bitmap-inl.h | 1 - runtime/gc/accounting/heap_bitmap.h | 1 - runtime/gc/accounting/space_bitmap.cc | 4 +- runtime/gc/accounting/space_bitmap.h | 1 + runtime/gc/collector/garbage_collector.h | 1 - runtime/gc/space/image_space.h | 1 - runtime/gc/space/large_object_space.cc | 8 +- runtime/gc/space/large_object_space.h | 2 +- runtime/image_test.cc | 1 - runtime/interpreter/interpreter.cc | 1 - runtime/jdwp/jdwp_handler.cc | 1 - runtime/mirror/abstract_method.h | 8 +- runtime/mirror/class.cc | 4 +- runtime/oat/runtime/argument_visitor.h | 3 +- runtime/oat_file.cc | 1 - runtime/runtime_support_llvm.cc | 2 - runtime/runtime_support_llvm.h | 3 - runtime/stack.h | 3 +- runtime/thread.cc | 4 +- runtime/thread_pool.cc | 13 +--- runtime/thread_pool.h | 4 +- runtime/thread_pool_test.cc | 4 +- runtime/trace.h | 1 + runtime/verifier/method_verifier.cc | 1 - runtime/verifier/reg_type.h | 4 + runtime/verifier/reg_type_test.cc | 4 +- runtime/verifier/register_line.cc | 1 - test/ReferenceMap/stack_walk_refmap_jni.cc | 2 +- test/StackWalk/stack_walk_jni.cc | 2 +- 68 files changed, 134 insertions(+), 177 deletions(-) diff --git a/Android.mk b/Android.mk index 27bd894..971eb2f 100644 --- a/Android.mk +++ b/Android.mk @@ -334,15 +334,15 @@ endif .PHONY: cpplint-art cpplint-art: ./art/tools/cpplint.py \ - --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline \ - $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION)) + --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline,+whitespace/parens \ + $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/) # "mm cpplint-art-aspirational" to see warnings we would like to fix .PHONY: cpplint-art-aspirational cpplint-art-aspirational: ./art/tools/cpplint.py \ --filter=-whitespace/comments,-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references \ - $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION)) + $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/) ######################################################################## # targets to switch back and forth from libdvm to libart diff --git a/compiler/dex/arena_allocator.h b/compiler/dex/arena_allocator.h index 0ad859e..cd2141a 100644 --- a/compiler/dex/arena_allocator.h +++ b/compiler/dex/arena_allocator.h @@ -28,7 +28,6 @@ namespace art { class ArenaAllocator { public: - // Type of allocation for memory tuning. enum ArenaAllocKind { kAllocMisc, @@ -57,7 +56,6 @@ class ArenaAllocator { void DumpMemStats(std::ostream& os) const; private: - // Variable-length allocation block. struct ArenaMemBlock { size_t block_size; @@ -77,7 +75,6 @@ class ArenaAllocator { uint32_t alloc_stats_[kNumAllocKinds]; // Bytes used by various allocation kinds. uint32_t lost_bytes_; // Lost memory at end of too-small region uint32_t num_allocations_; - }; // ArenaAllocator diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h index 7e5c436..de30859 100644 --- a/compiler/dex/arena_bit_vector.h +++ b/compiler/dex/arena_bit_vector.h @@ -30,7 +30,6 @@ namespace art { */ class ArenaBitVector { public: - class Iterator { public: explicit Iterator(ArenaBitVector* bit_vector) diff --git a/compiler/dex/backend.h b/compiler/dex/backend.h index 7fa8e99..acfec42 100644 --- a/compiler/dex/backend.h +++ b/compiler/dex/backend.h @@ -23,7 +23,6 @@ namespace art { class Backend { - public: virtual ~Backend() {}; virtual void Materialize() = 0; @@ -32,7 +31,6 @@ class Backend { protected: explicit Backend(ArenaAllocator* arena) : arena_(arena) {}; ArenaAllocator* const arena_; - }; // Class Backend } // namespace art diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h index 1946869..e427862 100644 --- a/compiler/dex/dataflow_iterator.h +++ b/compiler/dex/dataflow_iterator.h @@ -41,7 +41,6 @@ namespace art { */ class DataflowIterator { public: - virtual ~DataflowIterator() {} // Return the next BasicBlock* to visit. @@ -81,7 +80,6 @@ namespace art { GrowableArray* block_id_list_; int idx_; bool changed_; - }; // DataflowIterator class ReachableNodesIterator : public DataflowIterator { @@ -106,7 +104,6 @@ namespace art { class PostOrderDfsIterator : public DataflowIterator { public: - PostOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative) : DataflowIterator(mir_graph, is_iterative, 0, mir_graph->GetNumReachableBlocks(), false) { diff --git a/compiler/dex/growable_array.h b/compiler/dex/growable_array.h index 6d26bc2..3bfbcd4 100644 --- a/compiler/dex/growable_array.h +++ b/compiler/dex/growable_array.h @@ -46,7 +46,6 @@ enum OatListKind { template class GrowableArray { public: - class Iterator { public: explicit Iterator(GrowableArray* g_list) diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc index b783f3e..35d2923 100644 --- a/compiler/dex/local_value_numbering.cc +++ b/compiler/dex/local_value_numbering.cc @@ -509,7 +509,6 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) { AdvanceMemoryVersion(NO_VALUE, field_ref); } break; - } return res; } diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h index 09ed7ae..d29600a 100644 --- a/compiler/dex/local_value_numbering.h +++ b/compiler/dex/local_value_numbering.h @@ -135,7 +135,6 @@ class LocalValueNumbering { ValueMap value_map_; MemoryVersionMap memory_version_map_; std::set null_checked_; - }; } // namespace art diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index a9af477..0b3fa46 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -804,7 +804,6 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks) { if (bb->successor_block_list.block_list_type == kPackedSwitch || bb->successor_block_list.block_list_type == kSparseSwitch) { - GrowableArray::Iterator iter(bb->successor_block_list.blocks); succ_id = 0; diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index f86e130..f6011e0 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -553,7 +553,6 @@ class MIRGraph { static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst]; private: - int FindCommonParent(int block1, int block2); void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1, const ArenaBitVector* src2); diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc index 4317d1e..cfd3daf 100644 --- a/compiler/dex/portable/mir_to_gbc.cc +++ b/compiler/dex/portable/mir_to_gbc.cc @@ -74,7 +74,6 @@ void MirConverter::DefineValueOnly(::llvm::Value* val, int s_reg) { ::llvm::Instruction* inst = ::llvm::dyn_cast< ::llvm::Instruction>(placeholder); DCHECK(inst != NULL); inst->eraseFromParent(); - } void MirConverter::DefineValue(::llvm::Value* val, int s_reg) { @@ -1580,8 +1579,7 @@ void MirConverter::HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb) { /* Extended MIR instructions like PHI */ void MirConverter::ConvertExtendedMIR(BasicBlock* bb, MIR* mir, - ::llvm::BasicBlock* llvm_bb) { - + ::llvm::BasicBlock* llvm_bb) { switch (static_cast(mir->dalvikInsn.opcode)) { case kMirOpPhi: { // The llvm Phi node already emitted - just DefineValue() here. @@ -1706,7 +1704,6 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) { HandlePhiNodes(bb, llvm_bb); for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { - SetDexOffset(mir->offset); int opcode = mir->dalvikInsn.opcode; @@ -1795,7 +1792,6 @@ char RemapShorty(char shorty_type) { } ::llvm::FunctionType* MirConverter::GetFunctionType() { - // Get return type ::llvm::Type* ret_type = irb_->getJType(RemapShorty(cu_->shorty[0])); diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h index 2786314..2b681f6 100644 --- a/compiler/dex/portable/mir_to_gbc.h +++ b/compiler/dex/portable/mir_to_gbc.h @@ -41,7 +41,6 @@ Backend* PortableCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_gr llvm::LlvmCompilationUnit* const llvm_compilation_unit); class MirConverter : public Backend { - public: // TODO: flesh out and integrate into new world order. MirConverter(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc index f4aa1f3..0649c9f 100644 --- a/compiler/dex/quick/arm/assemble_arm.cc +++ b/compiler/dex/quick/arm/assemble_arm.cc @@ -1007,7 +1007,6 @@ AssemblerStatus ArmMir2Lir::AssembleInstructions(uintptr_t start_addr) { AssemblerStatus res = kSuccess; // Assume success for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { - if (lir->opcode < 0) { /* 1 means padding is needed */ if ((lir->opcode == kPseudoPseudoAlign4) && (lir->operands[0] == 1)) { diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index e169dc8..8698b1f 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -969,7 +969,6 @@ void Mir2Lir::Materialize() { /* Method is not empty */ if (first_lir_insn_) { - // mark the targets of switch statement case labels ProcessSwitchTables(); @@ -979,9 +978,7 @@ void Mir2Lir::Materialize() { if (cu_->verbose) { CodegenDump(); } - } - } CompiledMethod* Mir2Lir::GetCompiledMethod() { diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 14e395c..fd8f86b 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -736,7 +736,6 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, const MethodReference& target_method, uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, InvokeType type, bool skip_this) { - // If we can treat it as non-range (Jumbo ops will use range form) if (info->num_arg_words <= 5) return GenDalvikArgsNoRange(info, call_state, pcrLabel, diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc index eb27bf8..2e9c845 100644 --- a/compiler/dex/quick/local_optimizations.cc +++ b/compiler/dex/quick/local_optimizations.cc @@ -73,11 +73,14 @@ void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src) { void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { LIR* this_lir; - if (head_lir == tail_lir) return; + if (head_lir == tail_lir) { + return; + } for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) { - - if (is_pseudo_opcode(this_lir->opcode)) continue; + if (is_pseudo_opcode(this_lir->opcode)) { + continue; + } int sink_distance = 0; @@ -110,7 +113,9 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { * Currently only eliminate redundant ld/st for constant and Dalvik * register accesses. */ - if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue; + if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) { + continue; + } uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM; uint64_t stop_use_reg_mask; @@ -127,12 +132,13 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { } for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) { - /* * Skip already dead instructions (whose dataflow information is * outdated and misleading). */ - if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) continue; + if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) { + continue; + } uint64_t check_mem_mask = (check_lir->use_mask | check_lir->def_mask) & ENCODE_MEM; uint64_t alias_condition = this_mem_mask & check_mem_mask; @@ -274,12 +280,15 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { LIR* prev_inst_list[MAX_HOIST_DISTANCE]; /* Empty block */ - if (head_lir == tail_lir) return; + if (head_lir == tail_lir) { + return; + } /* Start from the second instruction */ for (this_lir = NEXT_LIR(head_lir); this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) { - - if (is_pseudo_opcode(this_lir->opcode)) continue; + if (is_pseudo_opcode(this_lir->opcode)) { + continue; + } uint64_t target_flags = GetTargetInstFlags(this_lir->opcode); /* Skip non-interesting instructions */ @@ -312,12 +321,13 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { /* Try to hoist the load to a good spot */ for (check_lir = PREV_LIR(this_lir); check_lir != head_lir; check_lir = PREV_LIR(check_lir)) { - /* * Skip already dead instructions (whose dataflow information is * outdated and misleading). */ - if (check_lir->flags.is_nop) continue; + if (check_lir->flags.is_nop) { + continue; + } uint64_t check_mem_mask = check_lir->def_mask & ENCODE_MEM; uint64_t alias_condition = stop_use_all_mask & check_mem_mask; @@ -355,7 +365,9 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { */ if (stop_here || !is_pseudo_opcode(check_lir->opcode)) { prev_inst_list[next_slot++] = check_lir; - if (next_slot == MAX_HOIST_DISTANCE) break; + if (next_slot == MAX_HOIST_DISTANCE) { + break; + } } /* Found a new place to put the load - move it here */ @@ -400,12 +412,16 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { * If the first instruction is a load, don't hoist anything * above it since it is unlikely to be beneficial. */ - if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) continue; + if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) { + continue; + } /* * If the remaining number of slots is less than LD_LATENCY, * insert the hoisted load here. */ - if (slot < LD_LATENCY) break; + if (slot < LD_LATENCY) { + break; + } } // Don't look across a barrier label @@ -461,7 +477,6 @@ void Mir2Lir::RemoveRedundantBranches() { LIR* this_lir; for (this_lir = first_lir_insn_; this_lir != last_lir_insn_; this_lir = NEXT_LIR(this_lir)) { - /* Branch to the next instruction */ if (IsUnconditionalBranch(this_lir)) { LIR* next_lir = this_lir; diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h index 376ad7f..802ff62 100644 --- a/compiler/dex/quick/mips/codegen_mips.h +++ b/compiler/dex/quick/mips/codegen_mips.h @@ -24,7 +24,6 @@ namespace art { class MipsMir2Lir : public Mir2Lir { public: - MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); // Required for target - codegen utilities. @@ -175,7 +174,6 @@ class MipsMir2Lir : public Mir2Lir { private: void ConvertShortToLongBranch(LIR* lir); - }; } // namespace art diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index abb687c..41e5a2d 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -166,7 +166,6 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, #define is_pseudo_opcode(opcode) (static_cast(opcode) < 0) class Mir2Lir : public Backend { - public: struct SwitchTable { int offset; diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index 4fa9dfb..edb5ae5 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -24,7 +24,6 @@ namespace art { class X86Mir2Lir : public Mir2Lir { public: - X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); // Required for target - codegen helpers. diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc index ccd2454..3a0cbcc 100644 --- a/compiler/dex/ssa_transformation.cc +++ b/compiler/dex/ssa_transformation.cc @@ -46,9 +46,13 @@ BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) { GrowableArray::Iterator iterator(bb->successor_block_list.blocks); while (true) { SuccessorBlockInfo *sbi = iterator.Next(); - if (sbi == NULL) break; + if (sbi == NULL) { + break; + } res = NeedsVisit(sbi->block); - if (res != NULL) break; + if (res != NULL) { + break; + } } } } @@ -112,12 +116,16 @@ void MIRGraph::ComputeDFSOrders() { * register idx is defined in BasicBlock bb. */ bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) { - if (bb->data_flow_info == NULL) return false; + if (bb->data_flow_info == NULL) { + return false; + } ArenaBitVector::Iterator iterator(bb->data_flow_info->def_v); while (true) { int idx = iterator.Next(); - if (idx == -1) break; + if (idx == -1) { + break; + } /* Block bb defines register idx */ def_block_matrix_[idx]->SetBit(bb->id); } @@ -222,7 +230,9 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) { GrowableArray::Iterator iterator(bb->successor_block_list.blocks); while (true) { SuccessorBlockInfo *successor_block_info = iterator.Next(); - if (successor_block_info == NULL) break; + if (successor_block_info == NULL) { + break; + } BasicBlock* succ_bb = successor_block_info->block; CheckForDominanceFrontier(bb, succ_bb); } @@ -233,13 +243,17 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) { while (true) { //TUNING: hot call to BitVectorIteratorNext int dominated_idx = bv_iterator.Next(); - if (dominated_idx == -1) break; + if (dominated_idx == -1) { + break; + } BasicBlock* dominated_bb = GetBasicBlock(dominated_idx); ArenaBitVector::Iterator df_iterator(dominated_bb->dom_frontier); while (true) { //TUNING: hot call to BitVectorIteratorNext int df_up_idx = df_iterator.Next(); - if (df_up_idx == -1) break; + if (df_up_idx == -1) { + break; + } BasicBlock* df_up_block = GetBasicBlock(df_up_idx); CheckForDominanceFrontier(bb, df_up_block); } @@ -313,7 +327,9 @@ bool MIRGraph::ComputeblockIDom(BasicBlock* bb) { /* Scan the rest of the predecessors */ while (true) { BasicBlock* pred_bb = iter.Next(); - if (!pred_bb) break; + if (!pred_bb) { + break; + } if (i_dom_list_[pred_bb->dfs_id] == NOTVISITED) { continue; } else { @@ -443,7 +459,9 @@ void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) { ArenaBitVector* temp_dalvik_register_v = temp_dalvik_register_v_; - if (bb->data_flow_info == NULL) return false; + if (bb->data_flow_info == NULL) { + return false; + } temp_dalvik_register_v->Copy(bb->data_flow_info->live_in_v); if (bb->taken && bb->taken->data_flow_info) ComputeSuccLineIn(temp_dalvik_register_v, bb->taken->data_flow_info->live_in_v, @@ -455,7 +473,9 @@ bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) { GrowableArray::Iterator iterator(bb->successor_block_list.blocks); while (true) { SuccessorBlockInfo *successor_block_info = iterator.Next(); - if (successor_block_info == NULL) break; + if (successor_block_info == NULL) { + break; + } BasicBlock* succ_bb = successor_block_info->block; if (succ_bb->data_flow_info) { ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v, @@ -504,25 +524,27 @@ void MIRGraph::InsertPhiNodes() { while (true) { int idx = iterator.Next(); - if (idx == -1) break; - BasicBlock* def_bb = GetBasicBlock(idx); + if (idx == -1) { + break; + } + BasicBlock* def_bb = GetBasicBlock(idx); - /* Merge the dominance frontier to tmp_blocks */ - //TUNING: hot call to Union(). - if (def_bb->dom_frontier != NULL) { - tmp_blocks->Union(def_bb->dom_frontier); - } + /* Merge the dominance frontier to tmp_blocks */ + //TUNING: hot call to Union(). + if (def_bb->dom_frontier != NULL) { + tmp_blocks->Union(def_bb->dom_frontier); } - if (!phi_blocks->Equal(tmp_blocks)) { - change = true; - phi_blocks->Copy(tmp_blocks); - - /* - * Iterate through the original blocks plus the new ones in - * the dominance frontier. - */ - input_blocks->Copy(phi_blocks); - input_blocks->Union(def_block_matrix_[dalvik_reg]); + } + if (!phi_blocks->Equal(tmp_blocks)) { + change = true; + phi_blocks->Copy(tmp_blocks); + + /* + * Iterate through the original blocks plus the new ones in + * the dominance frontier. + */ + input_blocks->Copy(phi_blocks); + input_blocks->Union(def_block_matrix_[dalvik_reg]); } } while (change); @@ -533,10 +555,14 @@ void MIRGraph::InsertPhiNodes() { ArenaBitVector::Iterator iterator(phi_blocks); while (true) { int idx = iterator.Next(); - if (idx == -1) break; + if (idx == -1) { + break; + } BasicBlock* phi_bb = GetBasicBlock(idx); /* Variable will be clobbered before being used - no need for phi */ - if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) continue; + if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) { + continue; + } MIR *phi = static_cast(arena_->NewMem(sizeof(MIR), true, ArenaAllocator::kAllocDFInfo)); phi->dalvikInsn.opcode = static_cast(kMirOpPhi); @@ -572,7 +598,9 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) { GrowableArray::Iterator iter(bb->predecessors); while (true) { BasicBlock* pred_bb = iter.Next(); - if (!pred_bb) break; + if (!pred_bb) { + break; + } int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg]; uses.push_back(ssa_reg); incoming_arc.push_back(pred_bb->id); @@ -605,8 +633,9 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) { } void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) { - - if (block->visited || block->hidden) return; + if (block->visited || block->hidden) { + return; + } block->visited = true; /* Process this block */ @@ -632,7 +661,9 @@ void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) { GrowableArray::Iterator iterator(block->successor_block_list.blocks); while (true) { SuccessorBlockInfo *successor_block_info = iterator.Next(); - if (successor_block_info == NULL) break; + if (successor_block_info == NULL) { + break; + } BasicBlock* succ_bb = successor_block_info->block; DoDFSPreOrderSSARename(succ_bb); /* Restore SSA map snapshot */ diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index d1d21b1..f1082db 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -1402,7 +1402,6 @@ class ParallelCompilationManager { } private: - class ForAllClosure : public Task { public: ForAllClosure(ParallelCompilationManager* manager, size_t begin, size_t end, Callback* callback, @@ -1423,6 +1422,7 @@ class ParallelCompilationManager { virtual void Finalize() { delete this; } + private: const ParallelCompilationManager* const manager_; const size_t begin_; diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc index 472a606..05f3b02 100644 --- a/compiler/elf_writer_mclinker.cc +++ b/compiler/elf_writer_mclinker.cc @@ -307,7 +307,6 @@ void ElfWriterMclinker::AddRuntimeInputs(const std::string& android_root, bool i // TODO: ownership of libm_lib_input? mcld::Input* libm_lib_input_input = ir_builder_->ReadInput(libm_lib, libm_lib); CHECK(libm_lib_input_input != NULL); - } #endif diff --git a/compiler/elf_writer_mclinker.h b/compiler/elf_writer_mclinker.h index 468fa9a..3b33bc4 100644 --- a/compiler/elf_writer_mclinker.h +++ b/compiler/elf_writer_mclinker.h @@ -38,7 +38,6 @@ class CompiledCode; class ElfWriterMclinker : public ElfWriter { public: - // Write an ELF file. Returns true on success, false on failure. static bool Create(File* file, std::vector& oat_contents, diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc index 4a02b61..e48806e 100644 --- a/compiler/elf_writer_test.cc +++ b/compiler/elf_writer_test.cc @@ -22,7 +22,6 @@ namespace art { class ElfWriterTest : public CommonTest { - protected: virtual void SetUp() { ReserveImageSpace(); diff --git a/compiler/jni/portable/jni_compiler.cc b/compiler/jni/portable/jni_compiler.cc index 44d0c2d..57b8a31 100644 --- a/compiler/jni/portable/jni_compiler.cc +++ b/compiler/jni/portable/jni_compiler.cc @@ -46,11 +46,10 @@ using namespace runtime_support; JniCompiler::JniCompiler(LlvmCompilationUnit* cunit, const CompilerDriver& driver, const DexCompilationUnit* dex_compilation_unit) -: cunit_(cunit), driver_(&driver), module_(cunit_->GetModule()), - context_(cunit_->GetLLVMContext()), irb_(*cunit_->GetIRBuilder()), - dex_compilation_unit_(dex_compilation_unit), - func_(NULL), elf_func_idx_(0) { - + : cunit_(cunit), driver_(&driver), module_(cunit_->GetModule()), + context_(cunit_->GetLLVMContext()), irb_(*cunit_->GetIRBuilder()), + dex_compilation_unit_(dex_compilation_unit), + func_(NULL), elf_func_idx_(0) { // Check: Ensure that JNI compiler will only get "native" method CHECK(dex_compilation_unit->IsNative()); } diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc index b671bd1..45dd429 100644 --- a/compiler/jni/quick/x86/calling_convention_x86.cc +++ b/compiler/jni/quick/x86/calling_convention_x86.cc @@ -159,7 +159,6 @@ size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() { // count JNIEnv* and return pc (pushed after Method*) size_t total_args = static_args + param_args + 2; return total_args; - } } // namespace x86 diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc index b139e32..94cc973 100644 --- a/compiler/llvm/gbc_expander.cc +++ b/compiler/llvm/gbc_expander.cc @@ -361,7 +361,6 @@ class GBCExpanderPass : public llvm::FunctionPass { llvm::Value* ExpandIntrinsic(IntrinsicHelper::IntrinsicId intr_id, llvm::CallInst& call_inst); - }; char GBCExpanderPass::ID = 0; @@ -710,7 +709,6 @@ llvm::Value* GBCExpanderPass::EmitLoadArrayLength(llvm::Value* array) { art::mirror::Array::LengthOffset().Int32Value(), irb_.getJIntTy(), kTBAAConstJObject); - } llvm::Value* @@ -751,7 +749,6 @@ EmitLoadVirtualCalleeMethodObjectAddr(int vtable_idx, llvm::Value* this_addr) { llvm::Value* GBCExpanderPass::EmitArrayGEP(llvm::Value* array_addr, llvm::Value* index_value, JType elem_jty) { - int data_offset; if (elem_jty == kLong || elem_jty == kDouble || (elem_jty == kObject && sizeof(uint64_t) == sizeof(art::mirror::Object*))) { @@ -1426,7 +1423,6 @@ llvm::Value* GBCExpanderPass::Expand_LongCompare(llvm::Value* src1_value, llvm:: llvm::Value* GBCExpanderPass::EmitCompareResultSelection(llvm::Value* cmp_eq, llvm::Value* cmp_lt) { - llvm::Constant* zero = irb_.getJInt(0); llvm::Constant* pos1 = irb_.getJInt(1); llvm::Constant* neg1 = irb_.getJInt(-1); @@ -2437,7 +2433,6 @@ EmitCallRuntimeForCalleeMethodObjectAddr(uint32_t callee_method_idx, llvm::Value* this_addr, uint32_t dex_pc, bool is_fast_path) { - llvm::Function* runtime_func = NULL; switch (invoke_type) { diff --git a/compiler/llvm/ir_builder.h b/compiler/llvm/ir_builder.h index 65da005..c81ba27 100644 --- a/compiler/llvm/ir_builder.h +++ b/compiler/llvm/ir_builder.h @@ -219,7 +219,6 @@ class IRBuilder : public LLVMIRBuilder { ::llvm::Value* CreatePtrDisp(::llvm::Value* base, ::llvm::Value* offset, ::llvm::PointerType* ret_ty) { - ::llvm::Value* base_int = CreatePtrToInt(base, getPtrEquivIntTy()); ::llvm::Value* result_int = CreateAdd(base_int, offset); ::llvm::Value* result = CreateIntToPtr(result_int, ret_ty); @@ -232,7 +231,6 @@ class IRBuilder : public LLVMIRBuilder { ::llvm::Value* count, ::llvm::Value* offset, ::llvm::PointerType* ret_ty) { - ::llvm::Value* block_offset = CreateMul(bs, count); ::llvm::Value* total_offset = CreateAdd(block_offset, offset); diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc index dfb5724..1f2b977 100644 --- a/compiler/llvm/llvm_compilation_unit.cc +++ b/compiler/llvm/llvm_compilation_unit.cc @@ -166,7 +166,6 @@ void LlvmCompilationUnit::DumpBitcodeToString(std::string& str_buffer) { } bool LlvmCompilationUnit::Materialize() { - const bool kDumpBitcode = false; if (kDumpBitcode) { // Dump the bitcode for debugging diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 0bfa4ec..4c32506 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -83,7 +83,6 @@ OatWriter::OatWriter(const std::vector& dex_files, size_oat_dex_file_methods_offsets_(0), size_oat_class_status_(0), size_oat_class_method_offsets_(0) { - size_t offset = InitOatHeader(); offset = InitOatDexFiles(offset); offset = InitDexFiles(offset); diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 541c916..9e23d3e 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -440,7 +440,6 @@ static size_t OpenDexFiles(const std::vector& dex_filenames, // during development when fatal aborts lead to a cascade of failures // that result in a deadlock. class WatchDog { - // WatchDog defines its own CHECK_PTHREAD_CALL to avoid using Log which uses locks #undef CHECK_PTHREAD_CALL #define CHECK_WATCH_DOG_PTHREAD_CALL(call, args, what) \ diff --git a/runtime/atomic_integer.h b/runtime/atomic_integer.h index 117e837..6711722 100644 --- a/runtime/atomic_integer.h +++ b/runtime/atomic_integer.h @@ -70,10 +70,11 @@ class AtomicInteger { bool success = android_atomic_cas(expected_value, new_value, &value_) == 0; return success; } + private: volatile int32_t value_; }; -} +} // namespace art #endif // ART_RUNTIME_ATOMIC_INTEGER_H_ diff --git a/runtime/barrier.cc b/runtime/barrier.cc index 250d468..a644998 100644 --- a/runtime/barrier.cc +++ b/runtime/barrier.cc @@ -60,4 +60,4 @@ Barrier::~Barrier() { CHECK(!count_) << "Attempted to destroy barrier with non zero count"; } -} +} // namespace art diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc index d26ae9e..298ae56 100644 --- a/runtime/barrier_test.cc +++ b/runtime/barrier_test.cc @@ -32,9 +32,7 @@ class CheckWaitTask : public Task { : barrier_(barrier), count1_(count1), count2_(count2), - count3_(count3) { - - } + count3_(count3) {} void Run(Thread* self) { LOG(INFO) << "Before barrier 1 " << *self; @@ -50,6 +48,7 @@ class CheckWaitTask : public Task { virtual void Finalize() { delete this; } + private: Barrier* const barrier_; AtomicInteger* const count1_; @@ -100,9 +99,7 @@ class CheckPassTask : public Task { CheckPassTask(Barrier* barrier, AtomicInteger* count, size_t subtasks) : barrier_(barrier), count_(count), - subtasks_(subtasks) { - - } + subtasks_(subtasks) {} void Run(Thread* self) { for (size_t i = 0; i < subtasks_; ++i) { diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h index bbca603..d572cf9 100644 --- a/runtime/base/histogram-inl.h +++ b/runtime/base/histogram-inl.h @@ -212,7 +212,6 @@ inline double Histogram::Percentile(double per) const { DCHECK_GT(cumulative_perc_.size(), 0ull); size_t idx, upper_idx = 0, lower_idx = 0; for (idx = 0; idx < cumulative_perc_.size(); idx++) { - if (per <= cumulative_perc_[idx]) { upper_idx = idx; break; diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h index dfb556b..33a1e65 100644 --- a/runtime/base/histogram.h +++ b/runtime/base/histogram.h @@ -30,7 +30,6 @@ namespace art { // Designed to be simple and used with timing logger in art. template class Histogram { - const double kAdjust; const Value kBucketWidth; const size_t kInitialBucketCount; diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h index 816cbea..0f00a04 100644 --- a/runtime/base/timing_logger.h +++ b/runtime/base/timing_logger.h @@ -50,9 +50,7 @@ namespace base { } // namespace base class CumulativeLogger { - public: - explicit CumulativeLogger(const std::string& name); void prepare_stats(); ~CumulativeLogger(); @@ -68,7 +66,6 @@ class CumulativeLogger { void AddNewLogger(const base::NewTimingLogger& logger) LOCKS_EXCLUDED(lock_); private: - void AddPair(const std::string &label, uint64_t delta_time) EXCLUSIVE_LOCKS_REQUIRED(lock_); void DumpHistogram(std::ostream &os) EXCLUSIVE_LOCKS_REQUIRED(lock_); diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 5a31c87..b502c9a 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -155,7 +155,6 @@ class DebugInstrumentationListener : public instrumentation::InstrumentationList SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { Dbg::PostException(thread, throw_location, catch_method, catch_dex_pc, exception_object); } - } gDebugInstrumentationListener; // JDWP is allowed unless the Zygote forbids it. @@ -761,7 +760,6 @@ JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id, JDWP::ObjectI JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector& class_ids, std::vector& counts) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - std::vector classes; counts.clear(); for (size_t i = 0; i < class_ids.size(); ++i) { diff --git a/runtime/dex_method_iterator.h b/runtime/dex_method_iterator.h index e915d77..1975e48 100644 --- a/runtime/dex_method_iterator.h +++ b/runtime/dex_method_iterator.h @@ -120,7 +120,6 @@ class DexMethodIterator { } private: - ClassDataItemIterator& GetIterator() const { CHECK(it_.get() != NULL); return *it_.get(); diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h index 7622604..5edea95 100644 --- a/runtime/gc/accounting/heap_bitmap-inl.h +++ b/runtime/gc/accounting/heap_bitmap-inl.h @@ -40,7 +40,6 @@ inline void HeapBitmap::Visit(const Visitor& visitor) { SpaceSetMap* set = *it; set->Visit(visitor); } - } } // namespace accounting diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h index f4b725c..1710579 100644 --- a/runtime/gc/accounting/heap_bitmap.h +++ b/runtime/gc/accounting/heap_bitmap.h @@ -106,7 +106,6 @@ class HeapBitmap { explicit HeapBitmap(Heap* heap) : heap_(heap) {} private: - const Heap* const heap_; void AddContinuousSpaceBitmap(SpaceBitmap* bitmap); diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc index 19f1128..6edc067 100644 --- a/runtime/gc/accounting/space_bitmap.cc +++ b/runtime/gc/accounting/space_bitmap.cc @@ -64,9 +64,7 @@ SpaceBitmap* SpaceBitmap::Create(const std::string& name, byte* heap_begin, size } // Clean up any resources associated with the bitmap. -SpaceBitmap::~SpaceBitmap() { - -} +SpaceBitmap::~SpaceBitmap() {} void SpaceBitmap::SetHeapLimit(uintptr_t new_end) { DCHECK(IsAligned(new_end)); diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h index 5a1bfe3..bf4c1ed 100644 --- a/runtime/gc/accounting/space_bitmap.h +++ b/runtime/gc/accounting/space_bitmap.h @@ -174,6 +174,7 @@ class SpaceBitmap { const size_t index = OffsetToIndex(offset); return &bitmap_begin_[index]; } + private: // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1, // however, we document that this is expected on heap_end_ diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h index a22faac..1684664 100644 --- a/runtime/gc/collector/garbage_collector.h +++ b/runtime/gc/collector/garbage_collector.h @@ -79,7 +79,6 @@ class GarbageCollector { void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); protected: - // The initial phase. Done without mutators paused. virtual void InitializePhase() = 0; diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h index fde2b41..bdda9fa 100644 --- a/runtime/gc/space/image_space.h +++ b/runtime/gc/space/image_space.h @@ -78,7 +78,6 @@ class ImageSpace : public MemMapSpace { void Dump(std::ostream& os) const; private: - // Tries to initialize an ImageSpace from the given image path, // returning NULL on error. // diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index f7d776f..6aedd9c 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -49,9 +49,7 @@ void LargeObjectSpace::CopyLiveToMarked() { LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name) : LargeObjectSpace(name), - lock_("large object map space lock", kAllocSpaceLock) { - -} + lock_("large object map space lock", kAllocSpaceLock) {} LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) { return new LargeObjectMapSpace(name); @@ -147,9 +145,7 @@ FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* beg AddFreeChunk(begin_, end_ - begin_, NULL); } -FreeListSpace::~FreeListSpace() { - -} +FreeListSpace::~FreeListSpace() {} void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) { Chunk* chunk = ChunkFromAddr(address); diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h index db845db..20a4867 100644 --- a/runtime/gc/space/large_object_space.h +++ b/runtime/gc/space/large_object_space.h @@ -60,7 +60,6 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace { size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs); protected: - explicit LargeObjectSpace(const std::string& name); // Approximate number of bytes which have been allocated into the space. @@ -165,6 +164,7 @@ class FreeListSpace : public LargeObjectSpace { DCHECK(m_previous == NULL || (m_previous != NULL && m_previous + m_previous->GetSize() / kAlignment == this)); } + private: size_t m_size; Chunk* m_previous; diff --git a/runtime/image_test.cc b/runtime/image_test.cc index 9ab1d74..ee50118 100644 --- a/runtime/image_test.cc +++ b/runtime/image_test.cc @@ -31,7 +31,6 @@ namespace art { class ImageTest : public CommonTest { - protected: virtual void SetUp() { ReserveImageSpace(); diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 2fb272c..45314c2 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -366,7 +366,6 @@ static void InterpreterJni(Thread* self, AbstractMethod* method, StringPiece sho { ScopedThreadStateChange tsc(self, kNative); jresult = fn(soa.Env(), rcvr.get(), arg0.get()); - } result->SetL(soa.Decode(jresult)); ScopedThreadStateChange tsc(self, kNative); diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc index 8ef146c..e141496 100644 --- a/runtime/jdwp/jdwp_handler.cc +++ b/runtime/jdwp/jdwp_handler.cc @@ -361,7 +361,6 @@ static JdwpError VM_Capabilities(JdwpState*, Request&, ExpandBuf* reply) static JdwpError VM_CapabilitiesNew(JdwpState*, Request& request, ExpandBuf* reply) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // The first few capabilities are the same as those reported by the older call. VM_Capabilities(NULL, request, reply); diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h index d909058..bbebece 100644 --- a/runtime/mirror/abstract_method.h +++ b/runtime/mirror/abstract_method.h @@ -497,13 +497,9 @@ class MANAGED AbstractMethod : public Object { DISALLOW_IMPLICIT_CONSTRUCTORS(AbstractMethod); }; -class MANAGED Method : public AbstractMethod { +class MANAGED Method : public AbstractMethod {}; -}; - -class MANAGED Constructor : public AbstractMethod { - -}; +class MANAGED Constructor : public AbstractMethod {}; class MANAGED AbstractMethodClass : public Class { private: diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 2d2130c..e490d97 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -320,13 +320,11 @@ bool Class::IsFieldClass() const { Class* java_lang_Class = GetClass(); Class* java_lang_reflect_Field = java_lang_Class->GetInstanceField(0)->GetClass(); return this == java_lang_reflect_Field; - } bool Class::IsMethodClass() const { return (this == AbstractMethod::GetMethodClass()) || - (this == AbstractMethod::GetConstructorClass()); - + (this == AbstractMethod::GetConstructorClass()); } void Class::SetClassLoader(ClassLoader* new_class_loader) { diff --git a/runtime/oat/runtime/argument_visitor.h b/runtime/oat/runtime/argument_visitor.h index d92ff19..aaf93f7 100644 --- a/runtime/oat/runtime/argument_visitor.h +++ b/runtime/oat/runtime/argument_visitor.h @@ -199,7 +199,6 @@ class QuickArgumentVisitor { uint64_t low_half = *reinterpret_cast(GetParamAddress()); uint64_t high_half = *reinterpret_cast(stack_args_); return (low_half & 0xffffffffULL) | (high_half << 32); - } void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -244,6 +243,6 @@ class QuickArgumentVisitor { bool is_split_long_or_double_; }; -} +} // namespace art #endif // ART_RUNTIME_OAT_RUNTIME_ARGUMENT_VISITOR_H_ diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index bb8341e..6562633 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -122,7 +122,6 @@ OatFile::~OatFile() { } bool OatFile::Dlopen(const std::string& elf_filename, byte* requested_base) { - char* absolute_path = realpath(elf_filename.c_str(), NULL); if (absolute_path == NULL) { return false; diff --git a/runtime/runtime_support_llvm.cc b/runtime/runtime_support_llvm.cc index cbdefe8..d703db2 100644 --- a/runtime/runtime_support_llvm.cc +++ b/runtime/runtime_support_llvm.cc @@ -50,7 +50,6 @@ using namespace art; extern "C" { - class ShadowFrameCopyVisitor : public StackVisitor { public: explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL), @@ -844,5 +843,4 @@ void art_portable_proxy_invoke_handler_from_code(mirror::AbstractMethod* proxy_m void art_portable_constructor_barrier() { LOG(FATAL) << "Implemented by IRBuilder."; } - } // extern "C" diff --git a/runtime/runtime_support_llvm.h b/runtime/runtime_support_llvm.h index 566f7bc..43ea953 100644 --- a/runtime/runtime_support_llvm.h +++ b/runtime/runtime_support_llvm.h @@ -18,13 +18,10 @@ #define ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ extern "C" { - //---------------------------------------------------------------------------- // Runtime Support Function Lookup Callback //---------------------------------------------------------------------------- - void* art_portable_find_runtime_support_func(void* context, const char* name); - } // extern "C" #endif // ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ diff --git a/runtime/stack.h b/runtime/stack.h index 0e2c4c5..99ba898 100644 --- a/runtime/stack.h +++ b/runtime/stack.h @@ -554,7 +554,6 @@ class StackVisitor { static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: - instrumentation::InstrumentationStackFrame GetInstrumentationStackFrame(uint32_t depth) const; void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -567,6 +566,7 @@ class StackVisitor { size_t num_frames_; // Depth of the frame we're currently at. size_t cur_depth_; + protected: Context* const context_; }; @@ -638,6 +638,7 @@ class VmapTable { spill_shifts--; // wind back one as we want the last match return spill_shifts; } + private: const uint16_t* table_; }; diff --git a/runtime/thread.cc b/runtime/thread.cc index dd55195..a1fb862 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -2104,9 +2104,7 @@ class ReferenceMapVisitor : public StackVisitor { class RootCallbackVisitor { public: - RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) { - - } + RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {} void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const { visitor_(obj, arg_); diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc index 784a7ca..067ef2d 100644 --- a/runtime/thread_pool.cc +++ b/runtime/thread_pool.cc @@ -180,10 +180,7 @@ size_t ThreadPool::GetTaskCount(Thread* self) { WorkStealingWorker::WorkStealingWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size) - : ThreadPoolWorker(thread_pool, name, stack_size), - task_(NULL) { - -} + : ThreadPoolWorker(thread_pool, name, stack_size), task_(NULL) {} void WorkStealingWorker::Run() { Thread* self = Thread::Current(); @@ -254,9 +251,7 @@ void WorkStealingWorker::Run() { } } -WorkStealingWorker::~WorkStealingWorker() { - -} +WorkStealingWorker::~WorkStealingWorker() {} WorkStealingThreadPool::WorkStealingThreadPool(size_t num_threads) : ThreadPool(0), @@ -288,8 +283,6 @@ WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom(Thread* self) { return NULL; } -WorkStealingThreadPool::~WorkStealingThreadPool() { - -} +WorkStealingThreadPool::~WorkStealingThreadPool() {} } // namespace art diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h index b9f185d..7b626fb 100644 --- a/runtime/thread_pool.h +++ b/runtime/thread_pool.h @@ -124,9 +124,7 @@ class ThreadPool { class WorkStealingTask : public Task { public: - WorkStealingTask() : ref_count_(0) { - - } + WorkStealingTask() : ref_count_(0) {} size_t GetRefCount() const { return ref_count_; diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc index 9b66318..98178bc 100644 --- a/runtime/thread_pool_test.cc +++ b/runtime/thread_pool_test.cc @@ -105,9 +105,7 @@ class TreeTask : public Task { TreeTask(ThreadPool* const thread_pool, AtomicInteger* count, int depth) : thread_pool_(thread_pool), count_(count), - depth_(depth) { - - } + depth_(depth) {} void Run(Thread* self) { if (depth_ > 1) { diff --git a/runtime/trace.h b/runtime/trace.h index 5bd6a8d..bd9c140 100644 --- a/runtime/trace.h +++ b/runtime/trace.h @@ -78,6 +78,7 @@ class Trace : public instrumentation::InstrumentationListener { mirror::AbstractMethod* catch_method, uint32_t catch_dex_pc, mirror::Throwable* exception_object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + private: explicit Trace(File* trace_file, int buffer_size, int flags); diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index 5a70f2a..ff7f594 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -3749,7 +3749,6 @@ MethodVerifier::MethodSafeCastSet* MethodVerifier::GenerateSafeCastSet() { } MethodVerifier::PcToConcreteMethodMap* MethodVerifier::GenerateDevirtMap() { - // It is risky to rely on reg_types for sharpening in cases of soft // verification, we might end up sharpening to a wrong implementation. Just abort. if (!failure_messages_.empty()) { diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h index c66e7cb..5b806c4 100644 --- a/runtime/verifier/reg_type.h +++ b/runtime/verifier/reg_type.h @@ -309,6 +309,7 @@ class ConflictType : public RegType { // Destroy the singleton instance. static void Destroy(); + private: ConflictType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) @@ -338,6 +339,7 @@ class UndefinedType : public RegType { // Destroy the singleton instance. static void Destroy(); + private: UndefinedType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) @@ -875,6 +877,7 @@ class UnresolvedSuperClass : public UnresolvedType { } std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + private: void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -909,6 +912,7 @@ class UnresolvedMergedType : public UnresolvedType { } std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + private: void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc index f37edff..d2c9dd6 100644 --- a/runtime/verifier/reg_type_test.cc +++ b/runtime/verifier/reg_type_test.cc @@ -414,7 +414,6 @@ TEST_F(RegTypeReferenceTest, Dump) { EXPECT_EQ(expected, unresolved_merged.Dump()); } - TEST_F(RegTypeReferenceTest, JavalangString) { // Add a class to the cache then look for the same class and make sure it is a // Hit the second time. Then check for the same effect when using @@ -433,8 +432,8 @@ TEST_F(RegTypeReferenceTest, JavalangString) { const RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull); EXPECT_TRUE(ref_type_unintialized.IsUninitializedReference()); EXPECT_FALSE(ref_type_unintialized.IsUnresolvedAndUninitializedReference()); - } + TEST_F(RegTypeReferenceTest, JavalangObject) { // Add a class to the cache then look for the same class and make sure it is a // Hit the second time. Then I am checking for the same effect when using @@ -474,7 +473,6 @@ TEST_F(RegTypeReferenceTest, Merging) { TEST_F(RegTypeTest, ConstPrecision) { - // Tests creating primitive types types. ScopedObjectAccess soa(Thread::Current()); RegTypeCache cache_new(true); diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc index 3a2145b..d2abaac 100644 --- a/runtime/verifier/register_line.cc +++ b/runtime/verifier/register_line.cc @@ -254,7 +254,6 @@ void RegisterLine::CopyResultRegister2(uint32_t vdst) { SetRegisterTypeWide(vdst, type_l, type_h); // also sets the high result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId(); result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId(); - } } diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc index 9ef4a59..492916e 100644 --- a/test/ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/ReferenceMap/stack_walk_refmap_jni.cc @@ -280,4 +280,4 @@ extern "C" JNIEXPORT jint JNICALL Java_ReferenceMap_refmap(JNIEnv*, jobject, jin return count + 1; } -} +} // namespace art diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc index 4b472da..fc156b1 100644 --- a/test/StackWalk/stack_walk_jni.cc +++ b/test/StackWalk/stack_walk_jni.cc @@ -127,4 +127,4 @@ extern "C" JNIEXPORT jint JNICALL Java_StackWalk2_refmap2(JNIEnv*, jobject, jint return count + 1; } -} +} // namespace art -- cgit v1.1