summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrian Carlstrom <bdc@google.com>2013-07-18 10:50:06 -0700
committerBrian Carlstrom <bdc@google.com>2013-07-18 10:50:06 -0700
commitb9070095218595a5d6a37ef874df2794c1761030 (patch)
treecab984d0ac72b8df4915f75277cd7efe0e8c9a3e
parent08524597899d0bb021c9165218deff51dc88da50 (diff)
parentdf62950e7a32031b82360c407d46a37b94188fbb (diff)
downloadart-b9070095218595a5d6a37ef874df2794c1761030.zip
art-b9070095218595a5d6a37ef874df2794c1761030.tar.gz
art-b9070095218595a5d6a37ef874df2794c1761030.tar.bz2
resolved conflicts for merge of df62950e to dalvik-dev
Change-Id: I78fbcfc7a2dcbeccb7557ca27302928d7d00debd
-rw-r--r--compiler/dex/arena_bit_vector.h2
-rw-r--r--compiler/dex/dataflow_iterator.h2
-rw-r--r--compiler/dex/frontend.cc4
-rw-r--r--compiler/dex/mir_dataflow.cc4
-rw-r--r--compiler/dex/mir_graph.cc11
-rw-r--r--compiler/dex/mir_optimization.cc4
-rw-r--r--compiler/dex/portable/mir_to_gbc.cc8
-rw-r--r--compiler/dex/quick/arm/fp_arm.cc2
-rw-r--r--compiler/dex/quick/arm/int_arm.cc10
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc2
-rw-r--r--compiler/dex/quick/codegen_util.cc18
-rw-r--r--compiler/dex/quick/gen_common.cc4
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc6
-rw-r--r--compiler/dex/quick/mir_to_lir.h2
-rw-r--r--compiler/dex/quick/ralloc_util.cc3
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc2
-rw-r--r--compiler/dex/ssa_transformation.cc2
-rw-r--r--compiler/dex/vreg_analysis.cc2
-rw-r--r--compiler/llvm/llvm_compilation_unit.cc2
-rw-r--r--compiler/oat_writer.cc3
-rw-r--r--runtime/base/mutex-inl.h4
-rw-r--r--runtime/base/mutex.cc16
-rw-r--r--runtime/check_jni.cc2
-rw-r--r--runtime/class_linker_test.cc14
-rw-r--r--runtime/common_test.h5
-rw-r--r--runtime/debugger.cc4
-rw-r--r--runtime/dex_file.h2
-rw-r--r--runtime/dex_instruction.cc5
-rw-r--r--runtime/disassembler_arm.cc4
-rw-r--r--runtime/elf_file.cc2
-rw-r--r--runtime/gc/accounting/atomic_stack.h2
-rw-r--r--runtime/gc/accounting/mod_union_table.cc30
-rw-r--r--runtime/gc/accounting/space_bitmap.h4
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc2
-rw-r--r--runtime/gc/collector/mark_sweep.cc16
-rw-r--r--runtime/gc/heap.cc20
-rw-r--r--runtime/gc/heap.h2
-rw-r--r--runtime/interpreter/interpreter.cc10
-rw-r--r--runtime/oat.h3
-rw-r--r--runtime/reflection.cc9
-rw-r--r--runtime/stack.cc6
-rw-r--r--runtime/thread.cc2
-rw-r--r--runtime/thread.h2
-rw-r--r--runtime/thread_list.cc2
-rw-r--r--runtime/trace.cc2
-rw-r--r--runtime/utils.h6
-rw-r--r--runtime/verifier/method_verifier.cc16
-rw-r--r--runtime/verifier/reg_type.cc4
-rw-r--r--runtime/verifier/reg_type_cache.cc2
-rw-r--r--runtime/verifier/reg_type_cache.h2
-rw-r--r--runtime/verifier/reg_type_test.cc2
-rw-r--r--test/ReferenceMap/stack_walk_refmap_jni.cc4
-rw-r--r--test/StackWalk/stack_walk_jni.cc4
-rwxr-xr-xtools/cpplint.py5
54 files changed, 152 insertions, 156 deletions
diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h
index de30859..2a05b77 100644
--- a/compiler/dex/arena_bit_vector.h
+++ b/compiler/dex/arena_bit_vector.h
@@ -83,7 +83,7 @@ class ArenaBitVector {
OatBitMapKind kind = kBitMapMisc);
~ArenaBitVector() {};
- static void* operator new( size_t size, ArenaAllocator* arena) {
+ static void* operator new(size_t size, ArenaAllocator* arena) {
return arena->NewMem(sizeof(ArenaBitVector), true, ArenaAllocator::kAllocGrowableBitMap);
}
static void operator delete(void* p) {}; // Nop.
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index e427862..847a614 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -137,7 +137,7 @@ namespace art {
AllNodesIterator(MIRGraph* mir_graph, bool is_iterative)
: DataflowIterator(mir_graph, is_iterative, 0, 0, false) {
all_nodes_iterator_ =
- new (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator (mir_graph->GetBlockList());
+ new (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator(mir_graph->GetBlockList());
}
void Reset() {
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 0803914..ae160d6 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -53,7 +53,7 @@ LLVMInfo::LLVMInfo() {
llvm_module_ = new ::llvm::Module("art", *llvm_context_);
::llvm::StructType::create(*llvm_context_, "JavaObject");
art::llvm::makeLLVMModuleContents(llvm_module_);
- intrinsic_helper_.reset( new art::llvm::IntrinsicHelper(*llvm_context_, *llvm_module_));
+ intrinsic_helper_.reset(new art::llvm::IntrinsicHelper(*llvm_context_, *llvm_module_));
ir_builder_.reset(new art::llvm::IRBuilder(*llvm_context_, *llvm_module_, *intrinsic_helper_));
}
@@ -276,7 +276,7 @@ CompiledMethod* CompileOneMethod(CompilerDriver& compiler,
#if defined(ART_USE_PORTABLE_COMPILER)
, llvm_compilation_unit
#endif
- );
+ ); // NOLINT(whitespace/parens)
}
} // namespace art
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 9632388..be19d5a 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1122,9 +1122,9 @@ void MIRGraph::CompilerInitializeSSAConversion() {
size_t num_dalvik_reg = cu_->num_dalvik_registers;
ssa_base_vregs_ = new (arena_) GrowableArray<int>(arena_, num_dalvik_reg + GetDefCount() + 128,
- kGrowableArraySSAtoDalvikMap);
+ kGrowableArraySSAtoDalvikMap);
ssa_subscripts_ = new (arena_) GrowableArray<int>(arena_, num_dalvik_reg + GetDefCount() + 128,
- kGrowableArraySSAtoDalvikMap);
+ kGrowableArraySSAtoDalvikMap);
/*
* Initial number of SSA registers is equal to the number of Dalvik
* registers.
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 0b3fa46..634c576 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -410,7 +410,7 @@ void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset
(insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
kPackedSwitch : kSparseSwitch;
cur_block->successor_block_list.blocks =
- new (arena_)GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
+ new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
for (i = 0; i < size; i++) {
BasicBlock *case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
@@ -427,8 +427,8 @@ void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset
}
/* Fall-through case */
- BasicBlock* fallthrough_block = FindBlock( cur_offset + width, /* split */ false,
- /* create */ true, /* immed_pred_block_p */ NULL);
+ BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* split */ false,
+ /* create */ true, /* immed_pred_block_p */ NULL);
cur_block->fall_through = fallthrough_block;
fallthrough_block->predecessors->Insert(cur_block);
}
@@ -1146,8 +1146,9 @@ BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) {
bb->block_type = block_type;
bb->id = block_id;
// TUNING: better estimate of the exit block predecessors?
- bb->predecessors = new (arena_)
- GrowableArray<BasicBlock*>(arena_, (block_type == kExitBlock) ? 2048 : 2, kGrowableArrayPredecessors);
+ bb->predecessors = new (arena_) GrowableArray<BasicBlock*>(arena_,
+ (block_type == kExitBlock) ? 2048 : 2,
+ kGrowableArrayPredecessors);
bb->successor_block_list.block_list_type = kNotUsed;
block_id_map_.Put(block_id, block_id);
return bb;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 882b81a..f83bbb2 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -228,7 +228,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
MIR* mir_next = mir->next;
Instruction::Code br_opcode = mir_next->dalvikInsn.opcode;
ConditionCode ccode = kCondNv;
- switch(br_opcode) {
+ switch (br_opcode) {
case Instruction::IF_EQZ:
ccode = kCondEq;
break;
@@ -255,7 +255,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
(mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
(GetSSAUseCount(mir->ssa_rep->defs[0]) == 1)) {
mir_next->dalvikInsn.arg[0] = ccode;
- switch(opcode) {
+ switch (opcode) {
case Instruction::CMPL_FLOAT:
mir_next->dalvikInsn.opcode =
static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index cfd3daf..85ffec5 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -297,7 +297,7 @@ void MirConverter::EmitSuspendCheck() {
::llvm::Value* src1, ::llvm::Value* src2) {
::llvm::Value* res = NULL;
DCHECK_EQ(src1->getType(), src2->getType());
- switch(cc) {
+ switch (cc) {
case kCondEq: res = irb_->CreateICmpEQ(src1, src2); break;
case kCondNe: res = irb_->CreateICmpNE(src1, src2); break;
case kCondLt: res = irb_->CreateICmpSLT(src1, src2); break;
@@ -369,7 +369,7 @@ void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb,
::llvm::Value* MirConverter::GenArithOp(OpKind op, bool is_long,
::llvm::Value* src1, ::llvm::Value* src2) {
::llvm::Value* res = NULL;
- switch(op) {
+ switch (op) {
case kOpAdd: res = irb_->CreateAdd(src1, src2); break;
case kOpSub: res = irb_->CreateSub(src1, src2); break;
case kOpRsub: res = irb_->CreateSub(src2, src1); break;
@@ -393,7 +393,7 @@ void MirConverter::ConvertFPArithOp(OpKind op, RegLocation rl_dest,
::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
::llvm::Value* res = NULL;
- switch(op) {
+ switch (op) {
case kOpAdd: res = irb_->CreateFAdd(src1, src2); break;
case kOpSub: res = irb_->CreateFSub(src1, src2); break;
case kOpMul: res = irb_->CreateFMul(src1, src2); break;
@@ -1781,7 +1781,7 @@ char RemapShorty(char shorty_type) {
* types (which is valid so long as we always do a real expansion of passed
* arguments and field loads).
*/
- switch(shorty_type) {
+ switch (shorty_type) {
case 'Z' : shorty_type = 'I'; break;
case 'B' : shorty_type = 'I'; break;
case 'S' : shorty_type = 'I'; break;
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 2c626a0..8f73f0c 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -193,7 +193,7 @@ void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
}
NewLIR0(kThumb2Fmstat);
ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
- switch(ccode) {
+ switch (ccode) {
case kCondEq:
case kCondNe:
break;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index e12df6c..3a367c9 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -129,7 +129,7 @@ void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
int32_t low_reg = rl_src1.low_reg;
int32_t high_reg = rl_src1.high_reg;
- switch(ccode) {
+ switch (ccode) {
case kCondEq:
case kCondNe:
LIR* target;
@@ -270,7 +270,7 @@ void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
- switch(ccode) {
+ switch (ccode) {
case kCondEq:
OpCondBranch(kCondNe, not_taken);
break;
@@ -436,7 +436,7 @@ bool ArmMir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode,
int r_hi = AllocTemp();
int r_lo = AllocTemp();
NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
- switch(pattern) {
+ switch (pattern) {
case Divide3:
OpRegRegRegShift(kOpSub, rl_result.low_reg, r_hi,
rl_src.low_reg, EncodeShift(kArmAsr, 31));
@@ -1002,7 +1002,7 @@ void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
return;
}
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- switch(opcode) {
+ switch (opcode) {
case Instruction::SHL_LONG:
case Instruction::SHL_LONG_2ADDR:
if (shift_amount == 1) {
@@ -1090,7 +1090,7 @@ void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
int32_t mod_imm_hi = ModifiedImmediate(val_hi);
// Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
- switch(opcode) {
+ switch (opcode) {
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
case Instruction::SUB_LONG:
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 80f597d..305a147 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -549,7 +549,7 @@ LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
ArmOpcode opcode = kThumbBkpt;
switch (op) {
case kOpAdd:
- if ( !neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
+ if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
DCHECK_EQ((value & 0x3), 0);
return NewLIR1(kThumbAddSpI7, value >> 2);
} else if (short_form) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 8698b1f..7a59644 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -55,7 +55,7 @@ bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volat
}
/* Convert an instruction to a NOP */
-void Mir2Lir::NopLIR( LIR* lir) {
+void Mir2Lir::NopLIR(LIR* lir) {
lir->flags.is_nop = true;
}
@@ -190,10 +190,10 @@ void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) {
}
if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) {
- DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->use_mask, "use"));
+ DUMP_RESOURCE_MASK(DumpResourceMask((LIR*) lir, lir->use_mask, "use"));
}
if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) {
- DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->def_mask, "def"));
+ DUMP_RESOURCE_MASK(DumpResourceMask((LIR*) lir, lir->def_mask, "def"));
}
}
@@ -336,10 +336,10 @@ LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) {
}
static void PushWord(std::vector<uint8_t>&buf, int data) {
- buf.push_back( data & 0xff);
- buf.push_back( (data >> 8) & 0xff);
- buf.push_back( (data >> 16) & 0xff);
- buf.push_back( (data >> 24) & 0xff);
+ buf.push_back(data & 0xff);
+ buf.push_back((data >> 8) & 0xff);
+ buf.push_back((data >> 16) & 0xff);
+ buf.push_back((data >> 24) & 0xff);
}
static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
@@ -454,8 +454,8 @@ void Mir2Lir::InstallFillArrayData() {
if (tab_rec == NULL) break;
AlignBuffer(code_buffer_, tab_rec->offset);
for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
- code_buffer_.push_back( tab_rec->table[i] & 0xFF);
- code_buffer_.push_back( (tab_rec->table[i] >> 8) & 0xFF);
+ code_buffer_.push_back(tab_rec->table[i] & 0xFF);
+ code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF);
}
}
}
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index a34d2a9..d1bfd2d 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -279,7 +279,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
int r_dst = AllocTemp();
int r_idx = AllocTemp();
int r_val = INVALID_REG;
- switch(cu_->instruction_set) {
+ switch (cu_->instruction_set) {
case kThumb2:
r_val = TargetReg(kLr);
break;
@@ -1311,7 +1311,7 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
}
// NOTE: callout here is not a safepoint
- CallHelper(r_tgt, func_offset, false /* not a safepoint */ );
+ CallHelper(r_tgt, func_offset, false /* not a safepoint */);
if (op == kOpDiv)
rl_result = GetReturn(false);
else
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 8510006..127d191 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -107,7 +107,7 @@ LIR* MipsMir2Lir::LoadConstantNoClobber(int r_dest, int value) {
}
LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) {
- LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/ );
+ LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/);
res->target = target;
return res;
}
@@ -642,8 +642,8 @@ LIR* MipsMir2Lir::OpMem(OpKind op, int rBase, int disp) {
return NULL;
}
-LIR* MipsMir2Lir::StoreBaseIndexedDisp( int rBase, int r_index, int scale, int displacement,
- int r_src, int r_src_hi, OpSize size, int s_reg) {
+LIR* MipsMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+ int r_src, int r_src_hi, OpSize size, int s_reg) {
LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS";
return NULL;
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 41e5a2d..7765eaa 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -233,7 +233,7 @@ class Mir2Lir : public Backend {
RegisterClass oat_reg_class_by_size(OpSize size) {
return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte ||
- size == kSignedByte ) ? kCoreReg : kAnyReg;
+ size == kSignedByte) ? kCoreReg : kAnyReg;
}
size_t CodeBufferSizeInBytes() {
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 4c91223..bc3740a 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -1021,8 +1021,7 @@ void Mir2Lir::DoPromotion() {
if (!(cu_->disable_opt & (1 << kPromoteRegs))) {
// Promote FpRegs
- for (int i = 0; (i < num_regs) &&
- (FpRegs[i].count >= promotion_threshold ); i++) {
+ for (int i = 0; (i < num_regs) && (FpRegs[i].count >= promotion_threshold); i++) {
int p_map_idx = SRegToPMap(FpRegs[i].s_reg);
if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) {
int reg = AllocPreservedFPReg(FpRegs[i].s_reg,
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 6376e3b..75367a3 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -100,7 +100,7 @@ LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) {
}
LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
- LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/ );
+ LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
res->target = target;
return res;
}
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 3a0cbcc..7739e29 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -266,7 +266,7 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
int num_total_blocks = GetBasicBlockListCount();
- if (bb->dominators == NULL ) {
+ if (bb->dominators == NULL) {
bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
false /* expandable */, kBitMapDominators);
bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 10bbd1f..f361dd7 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -160,7 +160,7 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) {
if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
(mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
(mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
- switch(cu_->shorty[0]) {
+ switch (cu_->shorty[0]) {
case 'I':
changed |= SetCore(ssa_rep->uses[0], true);
break;
diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc
index 1f2b977..592059e 100644
--- a/compiler/llvm/llvm_compilation_unit.cc
+++ b/compiler/llvm/llvm_compilation_unit.cc
@@ -114,7 +114,7 @@ LlvmCompilationUnit::LlvmCompilationUnit(const CompilerLLVM* compiler_llvm, size
irb_.reset(new IRBuilder(*context_, *module_, *intrinsic_helper_));
// We always need a switch case, so just use a normal function.
- switch(GetInstructionSet()) {
+ switch (GetInstructionSet()) {
default:
runtime_support_.reset(new RuntimeSupportBuilder(*context_, *module_, *irb_));
break;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 4c32506..da05c49 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -399,8 +399,7 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
fp_spill_mask,
mapping_table_offset,
vmap_table_offset,
- gc_map_offset
- );
+ gc_map_offset);
if (compiler_driver_->IsImage()) {
ClassLinker* linker = Runtime::Current()->GetClassLinker();
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index 07157da..b3f5092 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -148,7 +148,7 @@ inline void ReaderWriterMutex::SharedLock(Thread* self) {
}
android_atomic_dec(&num_pending_readers_);
}
- } while(!done);
+ } while (!done);
#else
CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_));
#endif
@@ -176,7 +176,7 @@ inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
} else {
LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
}
- } while(!done);
+ } while (!done);
#else
CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
#endif
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 25c0b9e..1df0207 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -178,7 +178,7 @@ void BaseMutex::RecordContention(uint64_t blocked_tid, uint64_t owner_tid, uint6
do {
slot = cur_content_log_entry_;
new_slot = (slot + 1) % kContentionLogSize;
- } while(!cur_content_log_entry_.CompareAndSwap(slot, new_slot));
+ } while (!cur_content_log_entry_.CompareAndSwap(slot, new_slot));
contention_log_[new_slot].blocked_tid = blocked_tid;
contention_log_[new_slot].owner_tid = owner_tid;
contention_log_[new_slot].count = 1;
@@ -312,7 +312,7 @@ void Mutex::ExclusiveLock(Thread* self) {
}
android_atomic_dec(&num_contenders_);
}
- } while(!done);
+ } while (!done);
DCHECK_EQ(state_, 1);
exclusive_owner_ = SafeGetTid(self);
#else
@@ -344,7 +344,7 @@ bool Mutex::ExclusiveTryLock(Thread* self) {
} else {
return false;
}
- } while(!done);
+ } while (!done);
DCHECK_EQ(state_, 1);
exclusive_owner_ = SafeGetTid(self);
#else
@@ -404,7 +404,7 @@ void Mutex::ExclusiveUnlock(Thread* self) {
_exit(1);
}
}
- } while(!done);
+ } while (!done);
#else
CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_));
#endif
@@ -513,7 +513,7 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) {
}
android_atomic_dec(&num_pending_writers_);
}
- } while(!done);
+ } while (!done);
DCHECK_EQ(state_, -1);
exclusive_owner_ = SafeGetTid(self);
#else
@@ -545,7 +545,7 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
} else {
LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
}
- } while(!done);
+ } while (!done);
#else
CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
#endif
@@ -583,7 +583,7 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32
}
android_atomic_dec(&num_pending_writers_);
}
- } while(!done);
+ } while (!done);
exclusive_owner_ = SafeGetTid(self);
#else
timespec ts;
@@ -616,7 +616,7 @@ bool ReaderWriterMutex::SharedTryLock(Thread* self) {
// Owner holds it exclusively.
return false;
}
- } while(!done);
+ } while (!done);
#else
int result = pthread_rwlock_tryrdlock(&rwlock_);
if (result == EBUSY) {
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 403a2eb..7429ab1 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -401,7 +401,7 @@ class ScopedCheck {
*
* Use the kFlag_NullableUtf flag where 'u' field(s) are nullable.
*/
- void Check(bool entry, const char* fmt0, ...) SHARED_LOCKS_REQUIRED (Locks::mutator_lock_) {
+ void Check(bool entry, const char* fmt0, ...) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
va_list ap;
const mirror::AbstractMethod* traceMethod = NULL;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index e5844b0..3c1cd78 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -937,14 +937,14 @@ TEST_F(ClassLinkerTest, StaticFields) {
// TODO: Remove EXPECT_FALSE when GCC can handle EXPECT_EQ
// http://code.google.com/p/googletest/issues/detail?id=322
- EXPECT_FALSE( s0->GetBoolean(statics));
- EXPECT_EQ(6, s1->GetByte(statics));
- EXPECT_EQ('b', s2->GetChar(statics));
- EXPECT_EQ(-535, s3->GetShort(statics));
- EXPECT_EQ(2000000001, s4->GetInt(statics));
+ EXPECT_FALSE(s0->GetBoolean(statics));
+ EXPECT_EQ(6, s1->GetByte(statics));
+ EXPECT_EQ('b', s2->GetChar(statics));
+ EXPECT_EQ(-535, s3->GetShort(statics));
+ EXPECT_EQ(2000000001, s4->GetInt(statics));
EXPECT_EQ(0x34567890abcdef12LL, s5->GetLong(statics));
- EXPECT_EQ(0.75, s6->GetFloat(statics));
- EXPECT_EQ(16777219, s7->GetDouble(statics));
+ EXPECT_EQ(0.75, s6->GetFloat(statics));
+ EXPECT_EQ(16777219, s7->GetDouble(statics));
EXPECT_TRUE(s8->GetObject(statics)->AsString()->Equals("robot"));
}
diff --git a/runtime/common_test.h b/runtime/common_test.h
index 73c47b5..e735e27 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -178,8 +178,7 @@ class CommonTest : public testing::Test {
fp_spill_mask,
reinterpret_cast<uint32_t>(mapping_table),
reinterpret_cast<uint32_t>(vmap_table),
- reinterpret_cast<uint32_t>(gc_map)
- );
+ reinterpret_cast<uint32_t>(gc_map));
}
void MakeExecutable(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -308,7 +307,7 @@ class CommonTest : public testing::Test {
options.push_back(std::make_pair("-Xcheck:jni", reinterpret_cast<void*>(NULL)));
options.push_back(std::make_pair(min_heap_string.c_str(), reinterpret_cast<void*>(NULL)));
options.push_back(std::make_pair(max_heap_string.c_str(), reinterpret_cast<void*>(NULL)));
- if(!Runtime::Create(options, false)) {
+ if (!Runtime::Create(options, false)) {
LOG(FATAL) << "Failed to create runtime";
return;
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b502c9a..4fbee51 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -184,7 +184,7 @@ static Dbg::HpsgWhat gDdmNhsgWhat;
static ObjectRegistry* gRegistry = NULL;
// Recent allocation tracking.
-static Mutex gAllocTrackerLock DEFAULT_MUTEX_ACQUIRED_AFTER ("AllocTracker lock");
+static Mutex gAllocTrackerLock DEFAULT_MUTEX_ACQUIRED_AFTER("AllocTracker lock");
AllocRecord* Dbg::recent_allocation_records_ PT_GUARDED_BY(gAllocTrackerLock) = NULL; // TODO: CircularBuffer<AllocRecord>
static size_t gAllocRecordMax GUARDED_BY(gAllocTrackerLock) = 0;
static size_t gAllocRecordHead GUARDED_BY(gAllocTrackerLock) = 0;
@@ -2761,7 +2761,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec
VLOG(jdwp) << " Control has returned from event thread";
/* wait for thread to re-suspend itself */
- SuspendThread(thread_id, false /* request_suspension */ );
+ SuspendThread(thread_id, false /* request_suspension */);
self->TransitionFromSuspendedToRunnable();
}
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 28e06cc..8edeb18 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -1039,7 +1039,7 @@ class ClassDataItemIterator {
}
InvokeType GetMethodInvokeType(const DexFile::ClassDef& class_def) const {
if (HasNextDirectMethod()) {
- if ((GetMemberAccessFlags() & kAccStatic) != 0 ) {
+ if ((GetMemberAccessFlags() & kAccStatic) != 0) {
return kStatic;
} else {
return kDirect;
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 427baf2..6b41511 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -56,12 +56,11 @@ int const Instruction::kInstructionVerifyFlags[] = {
int const Instruction::kInstructionSizeInCodeUnits[] = {
#define INSTRUCTION_SIZE(opcode, c, p, format, r, i, a, v) \
- (( opcode == NOP ) ? -1 : \
+ ((opcode == NOP) ? -1 : \
((format >= k10x) && (format <= k10t)) ? 1 : \
((format >= k20t) && (format <= k22c)) ? 2 : \
((format >= k32x) && (format <= k3rc)) ? 3 : \
- ( format == k51l ) ? 5 : -1 \
- ),
+ (format == k51l) ? 5 : -1),
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_SIZE)
#undef DEX_INSTRUCTION_LIST
diff --git a/runtime/disassembler_arm.cc b/runtime/disassembler_arm.cc
index 172bef8..3c9cb6e 100644
--- a/runtime/disassembler_arm.cc
+++ b/runtime/disassembler_arm.cc
@@ -1184,7 +1184,7 @@ size_t DisassemblerArm::DumpThumb16(std::ostream& os, const uint8_t* instr_ptr)
ThumbRegister Rm(instr, 6);
ThumbRegister Rn(instr, 3);
ThumbRegister Rt(instr, 0);
- switch(opB) {
+ switch (opB) {
case 0: opcode << "str"; break;
case 1: opcode << "strh"; break;
case 2: opcode << "strb"; break;
@@ -1206,7 +1206,7 @@ size_t DisassemblerArm::DumpThumb16(std::ostream& os, const uint8_t* instr_ptr)
uint16_t opB = (instr >> 11) & 1;
ThumbRegister Rn(instr, 3);
ThumbRegister Rt(instr, 0);
- switch(opA) {
+ switch (opA) {
case 6:
imm5 <<= 2;
opcode << (opB == 0 ? "str" : "ldr");
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index da122e6..6ce36e8 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -366,7 +366,7 @@ static unsigned elfhash(const char *_name) {
const unsigned char *name = (const unsigned char *) _name;
unsigned h = 0, g;
- while(*name) {
+ while (*name) {
h = (h << 4) + *name++;
g = h & 0xf0000000;
h ^= g;
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index d677ade..92d9ea2 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -66,7 +66,7 @@ class AtomicStack {
// Stack overflow.
return false;
}
- } while(!back_index_.compare_and_swap(index, index + 1));
+ } while (!back_index_.compare_and_swap(index, index + 1));
begin_[index] = value;
return true;
}
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index aa02f82..91c9253 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -44,8 +44,8 @@ class MarkIfReachesAllocspaceVisitor {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) const {
+ void operator()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+ bool /* is_static */) const {
// TODO: Optimize?
// TODO: C++0x auto
const std::vector<space::ContinuousSpace*>& spaces = heap_->GetContinuousSpaces();
@@ -70,7 +70,7 @@ class ModUnionVisitor {
bitmap_(bitmap) {
}
- void operator ()(const Object* obj) const
+ void operator()(const Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_) {
DCHECK(obj != NULL);
@@ -90,7 +90,7 @@ class ModUnionClearCardSetVisitor {
: cleared_cards_(cleared_cards) {
}
- inline void operator ()(byte* card, byte expected_value, byte new_value) const {
+ inline void operator()(byte* card, byte expected_value, byte new_value) const {
if (expected_value == CardTable::kCardDirty) {
cleared_cards_->insert(card);
}
@@ -106,7 +106,7 @@ class ModUnionClearCardVisitor {
: cleared_cards_(cleared_cards) {
}
- void operator ()(byte* card, byte expected_card, byte new_card) const {
+ void operator()(byte* card, byte expected_card, byte new_card) const {
if (expected_card == CardTable::kCardDirty) {
cleared_cards_->push_back(card);
}
@@ -120,7 +120,7 @@ class ModUnionScanImageRootVisitor {
explicit ModUnionScanImageRootVisitor(collector::MarkSweep* const mark_sweep)
: mark_sweep_(mark_sweep) {}
- void operator ()(const Object* root) const
+ void operator()(const Object* root) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(root != NULL);
@@ -147,8 +147,8 @@ class AddToReferenceArrayVisitor {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) const {
+ void operator()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+ bool /* is_static */) const {
// Only add the reference if it is non null and fits our criteria.
if (ref != NULL && mod_union_table_->AddReference(obj, ref)) {
references_->push_back(ref);
@@ -168,7 +168,7 @@ class ModUnionReferenceVisitor {
references_(references) {
}
- void operator ()(const Object* obj) const
+ void operator()(const Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
DCHECK(obj != NULL);
// We don't have an early exit since we use the visitor pattern, an early
@@ -191,8 +191,8 @@ class CheckReferenceVisitor {
// Extra parameters are required since we use this same visitor signature for checking objects.
// TODO: Fixme when anotatalysis works with visitors.
- void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) const
+ void operator()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+ bool /* is_static */) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
Heap* heap = mod_union_table_->GetHeap();
if (ref != NULL && mod_union_table_->AddReference(obj, ref) &&
@@ -216,13 +216,13 @@ class CheckReferenceVisitor {
class ModUnionCheckReferences {
public:
- explicit ModUnionCheckReferences (ModUnionTableReferenceCache* mod_union_table,
- const std::set<const Object*>& references)
+ explicit ModUnionCheckReferences(ModUnionTableReferenceCache* mod_union_table,
+ const std::set<const Object*>& references)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
: mod_union_table_(mod_union_table), references_(references) {
}
- void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
DCHECK(obj != NULL);
CheckReferenceVisitor visitor(mod_union_table_, references_);
@@ -333,7 +333,7 @@ void ModUnionTableReferenceCache::MarkReferences(collector::MarkSweep* mark_swee
typedef SafeMap<const byte*, std::vector<const mirror::Object*> >::const_iterator It;
for (It it = references_.begin(); it != references_.end(); ++it) {
typedef std::vector<const mirror::Object*>::const_iterator It2;
- for (It2 it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
+ for (It2 it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref) {
mark_sweep->MarkRoot(*it_ref);
++count;
}
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index bf4c1ed..77f93a2 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -103,7 +103,7 @@ class SpaceBitmap {
: bitmap_(bitmap) {
}
- void operator ()(mirror::Object* obj) const {
+ void operator()(mirror::Object* obj) const {
bitmap_->Clear(obj);
}
private:
@@ -112,7 +112,7 @@ class SpaceBitmap {
template <typename Visitor>
void VisitRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const {
- for (; visit_begin < visit_end; visit_begin += kAlignment ) {
+ for (; visit_begin < visit_end; visit_begin += kAlignment) {
visitor(reinterpret_cast<mirror::Object*>(visit_begin));
}
}
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index d00d7c2..516a449 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -46,7 +46,7 @@ class BitmapVerify {
begin_(begin),
end_(end) {}
- void operator ()(const mirror::Object* obj) {
+ void operator()(const mirror::Object* obj) {
EXPECT_TRUE(obj >= begin_);
EXPECT_TRUE(obj <= end_);
EXPECT_TRUE(bitmap_->Test(obj) == ((reinterpret_cast<uintptr_t>(obj) & 0xF) != 0));
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 865ee13..8a72b60 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -72,7 +72,7 @@ class SetFingerVisitor {
public:
explicit SetFingerVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
- void operator ()(void* finger) const {
+ void operator()(void* finger) const {
mark_sweep_->SetFinger(reinterpret_cast<Object*>(finger));
}
@@ -522,7 +522,7 @@ class CheckObjectVisitor {
public:
explicit CheckObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
- void operator ()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
+ void operator()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
NO_THREAD_SAFETY_ANALYSIS {
if (kDebugLocking) {
Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
@@ -563,7 +563,7 @@ class ScanObjectVisitor {
explicit ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
// TODO: Fixme when anotatalysis works with visitors.
- void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
if (kDebugLocking) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -607,7 +607,7 @@ class CheckBitmapVisitor {
public:
explicit CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
- void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
if (kDebugLocking) {
Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
}
@@ -1079,8 +1079,8 @@ class MarkObjectVisitor {
explicit MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
// TODO: Fixme when anotatalysis works with visitors.
- void operator ()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) const
+ void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
+ bool /* is_static */) const
NO_THREAD_SAFETY_ANALYSIS {
if (kDebugLocking) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
@@ -1146,8 +1146,8 @@ class MarkStackChunk : public Task {
public:
explicit MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {}
- void operator ()(const Object* /* obj */, const Object* ref,
- const MemberOffset& /* offset */, bool /* is_static */) const {
+ void operator()(const Object* /* obj */, const Object* ref,
+ const MemberOffset& /* offset */, bool /* is_static */) const {
if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) {
chunk_task_->MarkStackPush(ref);
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 6423a0d..021d8e7 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -232,7 +232,7 @@ void Heap::DeleteThreadPool() {
// Sort spaces based on begin address
struct ContinuousSpaceSorter {
- bool operator ()(const space::ContinuousSpace* a, const space::ContinuousSpace* b) const {
+ bool operator()(const space::ContinuousSpace* a, const space::ContinuousSpace* b) const {
return a->Begin() < b->Begin();
}
};
@@ -895,8 +895,8 @@ class ReferringObjectsFinder {
}
// For MarkSweep::VisitObjectReferences.
- void operator ()(const mirror::Object* referrer, const mirror::Object* object,
- const MemberOffset&, bool) const {
+ void operator()(const mirror::Object* referrer, const mirror::Object* object,
+ const MemberOffset&, bool) const {
if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
referring_objects_.push_back(const_cast<mirror::Object*>(referrer));
}
@@ -1166,7 +1166,7 @@ static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg) {
class ScanVisitor {
public:
- void operator ()(const mirror::Object* obj) const {
+ void operator()(const mirror::Object* obj) const {
LOG(INFO) << "Would have rescanned object " << obj;
}
};
@@ -1184,8 +1184,8 @@ class VerifyReferenceVisitor {
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
// analysis on visitors.
- void operator ()(const mirror::Object* obj, const mirror::Object* ref,
- const MemberOffset& offset, bool /* is_static */) const
+ void operator()(const mirror::Object* obj, const mirror::Object* ref,
+ const MemberOffset& offset, bool /* is_static */) const
NO_THREAD_SAFETY_ANALYSIS {
// Verify that the reference is live.
if (UNLIKELY(ref != NULL && !IsLive(ref))) {
@@ -1265,7 +1265,7 @@ class VerifyObjectVisitor {
public:
explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
- void operator ()(const mirror::Object* obj) const
+ void operator()(const mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Note: we are verifying the references in obj but not obj itself, this is because obj must
// be live or else how did we find it in the live bitmap?
@@ -1312,8 +1312,8 @@ class VerifyReferenceCardVisitor {
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
- void operator ()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset,
- bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(const mirror::Object* obj, const mirror::Object* ref, const MemberOffset& offset,
+ bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
// Filter out class references since changing an object's class does not mark the card as dirty.
// Also handles large objects, since the only reference they hold is a class reference.
if (ref != NULL && !ref->IsClass()) {
@@ -1379,7 +1379,7 @@ class VerifyLiveStackReferences {
: heap_(heap),
failed_(false) {}
- void operator ()(const mirror::Object* obj) const
+ void operator()(const mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
collector::MarkSweep::VisitObjectReferences(obj, visitor);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index aaf449b..feccba3 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -70,7 +70,7 @@ namespace space {
class AgeCardVisitor {
public:
- byte operator ()(byte card) const {
+ byte operator()(byte card) const {
if (card == accounting::CardTable::kCardDirty) {
return card - 1;
} else {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 45314c2..376d3be 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -969,11 +969,11 @@ static inline const Instruction* FindNextInstructionFollowingException(Thread* s
return JValue(); /* Handled in caller. */ \
} \
} else { \
- inst = inst-> next_function (); \
+ inst = inst->next_function(); \
}
static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
- __attribute__ ((cold, noreturn, noinline));
+ __attribute__((cold, noreturn, noinline));
static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -989,7 +989,7 @@ static void UnexpectedOpcode(const Instruction* inst, MethodHelper& mh)
template<bool do_access_check>
static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register)
- NO_THREAD_SAFETY_ANALYSIS __attribute__ ((hot));
+ NO_THREAD_SAFETY_ANALYSIS __attribute__((hot));
template<bool do_access_check>
static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
@@ -1254,7 +1254,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte
if (UNLIKELY(s == NULL)) {
HANDLE_PENDING_EXCEPTION();
} else {
- shadow_frame.SetVRegReference( inst->VRegA_21c(), s);
+ shadow_frame.SetVRegReference(inst->VRegA_21c(), s);
inst = inst->Next_2xx();
}
break;
@@ -1265,7 +1265,7 @@ static JValue ExecuteImpl(Thread* self, MethodHelper& mh, const DexFile::CodeIte
if (UNLIKELY(s == NULL)) {
HANDLE_PENDING_EXCEPTION();
} else {
- shadow_frame.SetVRegReference( inst->VRegA_31c(), s);
+ shadow_frame.SetVRegReference(inst->VRegA_31c(), s);
inst = inst->Next_3xx();
}
break;
diff --git a/runtime/oat.h b/runtime/oat.h
index fb28962..4bd1871 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -97,8 +97,7 @@ class PACKED(4) OatMethodOffsets {
uint32_t fp_spill_mask,
uint32_t mapping_table_offset,
uint32_t vmap_table_offset,
- uint32_t gc_map_offset
- );
+ uint32_t gc_map_offset);
~OatMethodOffsets();
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 467575c..359b539 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -206,8 +206,7 @@ bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_r
ThrowClassCastException(throw_location,
StringPrintf("Couldn't convert result of type %s to %s",
PrettyDescriptor(srcType).c_str(),
- PrettyDescriptor(dstType).c_str()
- ).c_str());
+ PrettyDescriptor(dstType).c_str()).c_str());
}
return false;
}
@@ -297,8 +296,7 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object*
ThrowClassCastException(throw_location,
StringPrintf("Couldn't convert result of type %s to %s",
PrettyTypeOf(o).c_str(),
- PrettyDescriptor(dst_class).c_str()
- ).c_str());
+ PrettyDescriptor(dst_class).c_str()).c_str());
}
return false;
}
@@ -359,8 +357,7 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object*
StringPrintf("%s has type %s, got %s",
UnboxingFailureKind(m, index, f).c_str(),
PrettyDescriptor(dst_class).c_str(),
- PrettyDescriptor(src_descriptor.c_str()).c_str()
- ).c_str());
+ PrettyDescriptor(src_descriptor.c_str()).c_str()).c_str());
return false;
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index f4ae81d..35cd895 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -185,12 +185,12 @@ void StackVisitor::SetVReg(mirror::AbstractMethod* m, uint16_t vreg, uint32_t ne
}
uintptr_t StackVisitor::GetGPR(uint32_t reg) const {
- DCHECK (cur_quick_frame_ != NULL) << "This is a quick frame routine";
+ DCHECK(cur_quick_frame_ != NULL) << "This is a quick frame routine";
return context_->GetGPR(reg);
}
void StackVisitor::SetGPR(uint32_t reg, uintptr_t value) {
- DCHECK (cur_quick_frame_ != NULL) << "This is a quick frame routine";
+ DCHECK(cur_quick_frame_ != NULL) << "This is a quick frame routine";
context_->SetGPR(reg, value);
}
@@ -341,7 +341,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
}
cur_depth_++;
cur_shadow_frame_ = cur_shadow_frame_->GetLink();
- } while(cur_shadow_frame_ != NULL);
+ } while (cur_shadow_frame_ != NULL);
}
if (include_transitions) {
bool should_continue = VisitFrame();
diff --git a/runtime/thread.cc b/runtime/thread.cc
index f0d5417..d1e33b8 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1779,7 +1779,7 @@ class CatchBlockStackVisitor : public StackVisitor {
m->GetDexMethodIndex(), m, m->GetAccessFlags(), false, true);
verifier.Verify();
std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
- for(uint16_t reg = 0; reg < num_regs; reg++) {
+ for (uint16_t reg = 0; reg < num_regs; reg++) {
VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
switch (kind) {
case kUndefined:
diff --git a/runtime/thread.h b/runtime/thread.h
index 3b66943..388178f 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -104,7 +104,7 @@ class PACKED(4) Thread {
static Thread* Current() {
// We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
// that we can replace this with a direct %fs access on x86.
- if(!is_started_) {
+ if (!is_started_) {
return NULL;
} else {
void* thread = pthread_getspecific(Thread::pthread_key_self_);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 59c38b4..7aa835a 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -468,7 +468,7 @@ void ThreadList::WaitForOtherNonDaemonThreadsToExit() {
// Wait for another thread to exit before re-checking.
thread_exit_cond_.Wait(self);
}
- } while(!all_threads_are_daemons);
+ } while (!all_threads_are_daemons);
}
void ThreadList::SuspendAllDaemonThreads() {
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 3293290..2227b8d 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -230,7 +230,7 @@ void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int
// Create Trace object.
{
MutexLock mu(self, *Locks::trace_lock_);
- if(the_trace_ != NULL) {
+ if (the_trace_ != NULL) {
LOG(ERROR) << "Trace already in progress, ignoring this request";
} else {
the_trace_ = new Trace(trace_file.release(), buffer_size, flags);
diff --git a/runtime/utils.h b/runtime/utils.h
index a08e465..72597f5 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -352,18 +352,18 @@ bool IsValidOatFilename(const std::string& filename);
class VoidFunctor {
public:
template <typename A>
- inline void operator () (A a) const {
+ inline void operator() (A a) const {
UNUSED(a);
}
template <typename A, typename B>
- inline void operator () (A a, B b) const {
+ inline void operator() (A a, B b) const {
UNUSED(a);
UNUSED(b);
}
template <typename A, typename B, typename C>
- inline void operator () (A a, B b, C c) const {
+ inline void operator() (A a, B b, C c) const {
UNUSED(a);
UNUSED(b);
UNUSED(c);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index ff7f594..59de9b3 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1014,12 +1014,12 @@ bool MethodVerifier::VerifyCodeFlow() {
verifier::MethodVerifier::SetDexGcMap(ref, *dex_gc_map);
MethodVerifier::MethodSafeCastSet* method_to_safe_casts = GenerateSafeCastSet();
- if(method_to_safe_casts != NULL ) {
+ if (method_to_safe_casts != NULL) {
SetSafeCastMap(ref, method_to_safe_casts);
}
MethodVerifier::PcToConcreteMethodMap* pc_to_concrete_method = GenerateDevirtMap();
- if(pc_to_concrete_method != NULL ) {
+ if (pc_to_concrete_method != NULL) {
SetDevirtMap(ref, pc_to_concrete_method);
}
return true;
@@ -1824,7 +1824,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
uint32_t instance_of_idx = 0;
if (0 != work_insn_idx_) {
instance_of_idx = work_insn_idx_ - 1;
- while(0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
+ while (0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
instance_of_idx--;
}
CHECK(insn_flags_[instance_of_idx].IsOpcode());
@@ -1854,7 +1854,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
// which is not done because of the multiple inheritance implications.
const RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
- if(!cast_type.IsUnresolvedTypes() && !cast_type.GetClass()->IsInterface()) {
+ if (!cast_type.IsUnresolvedTypes() && !cast_type.GetClass()->IsInterface()) {
RegisterLine* update_line = new RegisterLine(code_item_->registers_size_, this);
if (inst->Opcode() == Instruction::IF_EQZ) {
fallthrough_line.reset(update_line);
@@ -1868,7 +1868,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
// register encoding space of instance-of, and propagate type information to the source
// of the move-object.
uint32_t move_idx = instance_of_idx - 1;
- while(0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
+ while (0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
move_idx--;
}
CHECK(insn_flags_[move_idx].IsOpcode());
@@ -3766,7 +3766,7 @@ MethodVerifier::PcToConcreteMethodMap* MethodVerifier::GenerateDevirtMap() {
bool is_interface = (inst->Opcode() == Instruction::INVOKE_INTERFACE) ||
(inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
- if(!is_interface && !is_virtual) {
+ if (!is_interface && !is_virtual) {
continue;
}
// Get reg type for register holding the reference to the object that will be dispatched upon.
@@ -3792,7 +3792,7 @@ MethodVerifier::PcToConcreteMethodMap* MethodVerifier::GenerateDevirtMap() {
}
mirror::AbstractMethod* abstract_method =
dex_cache_->GetResolvedMethod(is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
- if(abstract_method == NULL) {
+ if (abstract_method == NULL) {
// If the method is not found in the cache this means that it was never found
// by ResolveMethodAndCheckAccess() called when verifying invoke_*.
continue;
@@ -3986,7 +3986,7 @@ const MethodReference* MethodVerifier::GetDevirtMap(const MethodReference& ref,
// Look up the PC in the map, get the concrete method to execute and return its reference.
MethodVerifier::PcToConcreteMethodMap::const_iterator pc_to_concrete_method = it->second->find(dex_pc);
- if(pc_to_concrete_method != it->second->end()) {
+ if (pc_to_concrete_method != it->second->end()) {
return &(pc_to_concrete_method->second);
} else {
return NULL;
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 1c61a29..8418928 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -211,7 +211,7 @@ void LongHiType::Destroy() {
}
LongLoType* LongLoType::GetInstance() {
- CHECK (instance_ != NULL);
+ CHECK(instance_ != NULL);
return instance_;
}
@@ -355,7 +355,7 @@ BooleanType* BooleanType::GetInstance() {
}
void BooleanType::Destroy() {
- if(BooleanType::instance != NULL) {
+ if (BooleanType::instance != NULL) {
delete instance;
instance = NULL;
}
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 6013250..22c585c 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -377,7 +377,7 @@ const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
entry = new UnresolvedReferenceType(descriptor.c_str(), entries_.size());
} else {
mirror::Class* klass = uninit_type.GetClass();
- if(uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
+ if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
// For uninitialized "this reference" look for reference types that are not precise.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 814dff7..2411758 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -44,7 +44,7 @@ class RegTypeCache {
}
~RegTypeCache();
static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if(!RegTypeCache::primitive_initialized_) {
+ if (!RegTypeCache::primitive_initialized_) {
CHECK_EQ(RegTypeCache::primitive_count_, 0);
CreatePrimitiveTypes();
CHECK_EQ(RegTypeCache::primitive_count_, kNumPrimitives);
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index d2c9dd6..a24c3c9 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -405,7 +405,7 @@ TEST_F(RegTypeReferenceTest, Dump) {
std::string expected = "Unresolved Reference: java.lang.DoesNotExist";
EXPECT_EQ(expected, unresolved_ref.Dump());
expected = "Precise Reference: java.lang.String";
- EXPECT_EQ( expected, resolved_ref.Dump());
+ EXPECT_EQ(expected, resolved_ref.Dump());
expected ="Uninitialized Reference: java.lang.String Allocation PC: 10";
EXPECT_EQ(expected, resolved_unintialiesd.Dump());
expected = "Unresolved And Uninitialized Reference: java.lang.DoesNotExist Allocation PC: 12";
diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc
index 492916e..ccdbffd 100644
--- a/test/ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/ReferenceMap/stack_walk_refmap_jni.cc
@@ -33,8 +33,8 @@
namespace art {
#define IS_IN_REF_BITMAP(mh, ref_bitmap, reg) \
- ( ((reg) < mh.GetCodeItem()->registers_size_) && \
- (( *((ref_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01) )
+ (((reg) < mh.GetCodeItem()->registers_size_) && \
+ ((*((ref_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01))
#define CHECK_REGS_CONTAIN_REFS(...) \
do { \
diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc
index fc156b1..d100c10 100644
--- a/test/StackWalk/stack_walk_jni.cc
+++ b/test/StackWalk/stack_walk_jni.cc
@@ -31,8 +31,8 @@
namespace art {
#define REG(mh, reg_bitmap, reg) \
- ( ((reg) < mh.GetCodeItem()->registers_size_) && \
- (( *((reg_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01) )
+ (((reg) < mh.GetCodeItem()->registers_size_) && \
+ ((*((reg_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01))
#define CHECK_REGS(...) if (!IsShadowFrame()) { \
int t[] = {__VA_ARGS__}; \
diff --git a/tools/cpplint.py b/tools/cpplint.py
index 30c7128..da5a938 100755
--- a/tools/cpplint.py
+++ b/tools/cpplint.py
@@ -1526,7 +1526,10 @@ def CheckSpacingForFunctionCall(filename, line, linenum, error):
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
- not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and
+ # BEGIN android-changed
+ # not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and
+ not Search(r'\b(if|for|while|switch|return|delete|new)\b', fncall) and
+ # END android-changed
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.