summaryrefslogtreecommitdiffstats
path: root/compiler/dex
diff options
context:
space:
mode:
authorBrian Carlstrom <bdc@google.com>2013-07-17 22:39:56 -0700
committerBrian Carlstrom <bdc@google.com>2013-07-18 00:13:18 -0700
commitdf62950e7a32031b82360c407d46a37b94188fbb (patch)
tree038bf95a2ce296ae3e0c30a131ac22c0986f0f52 /compiler/dex
parent0cd7ec2dcd8d7ba30bf3ca420b40dac52849876c (diff)
downloadart-df62950e7a32031b82360c407d46a37b94188fbb.zip
art-df62950e7a32031b82360c407d46a37b94188fbb.tar.gz
art-df62950e7a32031b82360c407d46a37b94188fbb.tar.bz2
Fix cpplint whitespace/parens issues
Change-Id: Ifc678d59a8bed24ffddde5a0e543620b17b0aba9
Diffstat (limited to 'compiler/dex')
-rw-r--r--compiler/dex/arena_bit_vector.h2
-rw-r--r--compiler/dex/dataflow_iterator.h2
-rw-r--r--compiler/dex/frontend.cc4
-rw-r--r--compiler/dex/mir_dataflow.cc4
-rw-r--r--compiler/dex/mir_graph.cc11
-rw-r--r--compiler/dex/mir_optimization.cc4
-rw-r--r--compiler/dex/portable/mir_to_gbc.cc8
-rw-r--r--compiler/dex/quick/arm/fp_arm.cc2
-rw-r--r--compiler/dex/quick/arm/int_arm.cc10
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc2
-rw-r--r--compiler/dex/quick/codegen_util.cc18
-rw-r--r--compiler/dex/quick/gen_common.cc4
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc6
-rw-r--r--compiler/dex/quick/mir_to_lir.h2
-rw-r--r--compiler/dex/quick/ralloc_util.cc3
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc2
-rw-r--r--compiler/dex/ssa_transformation.cc2
-rw-r--r--compiler/dex/vreg_analysis.cc2
18 files changed, 44 insertions, 44 deletions
diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h
index de30859..2a05b77 100644
--- a/compiler/dex/arena_bit_vector.h
+++ b/compiler/dex/arena_bit_vector.h
@@ -83,7 +83,7 @@ class ArenaBitVector {
OatBitMapKind kind = kBitMapMisc);
~ArenaBitVector() {};
- static void* operator new( size_t size, ArenaAllocator* arena) {
+ static void* operator new(size_t size, ArenaAllocator* arena) {
return arena->NewMem(sizeof(ArenaBitVector), true, ArenaAllocator::kAllocGrowableBitMap);
}
static void operator delete(void* p) {}; // Nop.
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index e427862..847a614 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -137,7 +137,7 @@ namespace art {
AllNodesIterator(MIRGraph* mir_graph, bool is_iterative)
: DataflowIterator(mir_graph, is_iterative, 0, 0, false) {
all_nodes_iterator_ =
- new (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator (mir_graph->GetBlockList());
+ new (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator(mir_graph->GetBlockList());
}
void Reset() {
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 0803914..ae160d6 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -53,7 +53,7 @@ LLVMInfo::LLVMInfo() {
llvm_module_ = new ::llvm::Module("art", *llvm_context_);
::llvm::StructType::create(*llvm_context_, "JavaObject");
art::llvm::makeLLVMModuleContents(llvm_module_);
- intrinsic_helper_.reset( new art::llvm::IntrinsicHelper(*llvm_context_, *llvm_module_));
+ intrinsic_helper_.reset(new art::llvm::IntrinsicHelper(*llvm_context_, *llvm_module_));
ir_builder_.reset(new art::llvm::IRBuilder(*llvm_context_, *llvm_module_, *intrinsic_helper_));
}
@@ -276,7 +276,7 @@ CompiledMethod* CompileOneMethod(CompilerDriver& compiler,
#if defined(ART_USE_PORTABLE_COMPILER)
, llvm_compilation_unit
#endif
- );
+ ); // NOLINT(whitespace/parens)
}
} // namespace art
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 9632388..be19d5a 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1122,9 +1122,9 @@ void MIRGraph::CompilerInitializeSSAConversion() {
size_t num_dalvik_reg = cu_->num_dalvik_registers;
ssa_base_vregs_ = new (arena_) GrowableArray<int>(arena_, num_dalvik_reg + GetDefCount() + 128,
- kGrowableArraySSAtoDalvikMap);
+ kGrowableArraySSAtoDalvikMap);
ssa_subscripts_ = new (arena_) GrowableArray<int>(arena_, num_dalvik_reg + GetDefCount() + 128,
- kGrowableArraySSAtoDalvikMap);
+ kGrowableArraySSAtoDalvikMap);
/*
* Initial number of SSA registers is equal to the number of Dalvik
* registers.
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 0b3fa46..634c576 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -410,7 +410,7 @@ void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset
(insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
kPackedSwitch : kSparseSwitch;
cur_block->successor_block_list.blocks =
- new (arena_)GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
+ new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
for (i = 0; i < size; i++) {
BasicBlock *case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
@@ -427,8 +427,8 @@ void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset
}
/* Fall-through case */
- BasicBlock* fallthrough_block = FindBlock( cur_offset + width, /* split */ false,
- /* create */ true, /* immed_pred_block_p */ NULL);
+ BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* split */ false,
+ /* create */ true, /* immed_pred_block_p */ NULL);
cur_block->fall_through = fallthrough_block;
fallthrough_block->predecessors->Insert(cur_block);
}
@@ -1146,8 +1146,9 @@ BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) {
bb->block_type = block_type;
bb->id = block_id;
// TUNING: better estimate of the exit block predecessors?
- bb->predecessors = new (arena_)
- GrowableArray<BasicBlock*>(arena_, (block_type == kExitBlock) ? 2048 : 2, kGrowableArrayPredecessors);
+ bb->predecessors = new (arena_) GrowableArray<BasicBlock*>(arena_,
+ (block_type == kExitBlock) ? 2048 : 2,
+ kGrowableArrayPredecessors);
bb->successor_block_list.block_list_type = kNotUsed;
block_id_map_.Put(block_id, block_id);
return bb;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 882b81a..f83bbb2 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -228,7 +228,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
MIR* mir_next = mir->next;
Instruction::Code br_opcode = mir_next->dalvikInsn.opcode;
ConditionCode ccode = kCondNv;
- switch(br_opcode) {
+ switch (br_opcode) {
case Instruction::IF_EQZ:
ccode = kCondEq;
break;
@@ -255,7 +255,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
(mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
(GetSSAUseCount(mir->ssa_rep->defs[0]) == 1)) {
mir_next->dalvikInsn.arg[0] = ccode;
- switch(opcode) {
+ switch (opcode) {
case Instruction::CMPL_FLOAT:
mir_next->dalvikInsn.opcode =
static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index cfd3daf..85ffec5 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -297,7 +297,7 @@ void MirConverter::EmitSuspendCheck() {
::llvm::Value* src1, ::llvm::Value* src2) {
::llvm::Value* res = NULL;
DCHECK_EQ(src1->getType(), src2->getType());
- switch(cc) {
+ switch (cc) {
case kCondEq: res = irb_->CreateICmpEQ(src1, src2); break;
case kCondNe: res = irb_->CreateICmpNE(src1, src2); break;
case kCondLt: res = irb_->CreateICmpSLT(src1, src2); break;
@@ -369,7 +369,7 @@ void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb,
::llvm::Value* MirConverter::GenArithOp(OpKind op, bool is_long,
::llvm::Value* src1, ::llvm::Value* src2) {
::llvm::Value* res = NULL;
- switch(op) {
+ switch (op) {
case kOpAdd: res = irb_->CreateAdd(src1, src2); break;
case kOpSub: res = irb_->CreateSub(src1, src2); break;
case kOpRsub: res = irb_->CreateSub(src2, src1); break;
@@ -393,7 +393,7 @@ void MirConverter::ConvertFPArithOp(OpKind op, RegLocation rl_dest,
::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
::llvm::Value* res = NULL;
- switch(op) {
+ switch (op) {
case kOpAdd: res = irb_->CreateFAdd(src1, src2); break;
case kOpSub: res = irb_->CreateFSub(src1, src2); break;
case kOpMul: res = irb_->CreateFMul(src1, src2); break;
@@ -1781,7 +1781,7 @@ char RemapShorty(char shorty_type) {
* types (which is valid so long as we always do a real expansion of passed
* arguments and field loads).
*/
- switch(shorty_type) {
+ switch (shorty_type) {
case 'Z' : shorty_type = 'I'; break;
case 'B' : shorty_type = 'I'; break;
case 'S' : shorty_type = 'I'; break;
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 2c626a0..8f73f0c 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -193,7 +193,7 @@ void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
}
NewLIR0(kThumb2Fmstat);
ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
- switch(ccode) {
+ switch (ccode) {
case kCondEq:
case kCondNe:
break;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index e12df6c..3a367c9 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -129,7 +129,7 @@ void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
int32_t low_reg = rl_src1.low_reg;
int32_t high_reg = rl_src1.high_reg;
- switch(ccode) {
+ switch (ccode) {
case kCondEq:
case kCondNe:
LIR* target;
@@ -270,7 +270,7 @@ void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
- switch(ccode) {
+ switch (ccode) {
case kCondEq:
OpCondBranch(kCondNe, not_taken);
break;
@@ -436,7 +436,7 @@ bool ArmMir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode,
int r_hi = AllocTemp();
int r_lo = AllocTemp();
NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
- switch(pattern) {
+ switch (pattern) {
case Divide3:
OpRegRegRegShift(kOpSub, rl_result.low_reg, r_hi,
rl_src.low_reg, EncodeShift(kArmAsr, 31));
@@ -1002,7 +1002,7 @@ void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
return;
}
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- switch(opcode) {
+ switch (opcode) {
case Instruction::SHL_LONG:
case Instruction::SHL_LONG_2ADDR:
if (shift_amount == 1) {
@@ -1090,7 +1090,7 @@ void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
int32_t mod_imm_hi = ModifiedImmediate(val_hi);
// Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
- switch(opcode) {
+ switch (opcode) {
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
case Instruction::SUB_LONG:
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 80f597d..305a147 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -549,7 +549,7 @@ LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
ArmOpcode opcode = kThumbBkpt;
switch (op) {
case kOpAdd:
- if ( !neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
+ if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
DCHECK_EQ((value & 0x3), 0);
return NewLIR1(kThumbAddSpI7, value >> 2);
} else if (short_form) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 8698b1f..7a59644 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -55,7 +55,7 @@ bool Mir2Lir::FastInstance(uint32_t field_idx, int& field_offset, bool& is_volat
}
/* Convert an instruction to a NOP */
-void Mir2Lir::NopLIR( LIR* lir) {
+void Mir2Lir::NopLIR(LIR* lir) {
lir->flags.is_nop = true;
}
@@ -190,10 +190,10 @@ void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) {
}
if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) {
- DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->use_mask, "use"));
+ DUMP_RESOURCE_MASK(DumpResourceMask((LIR*) lir, lir->use_mask, "use"));
}
if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) {
- DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->def_mask, "def"));
+ DUMP_RESOURCE_MASK(DumpResourceMask((LIR*) lir, lir->def_mask, "def"));
}
}
@@ -336,10 +336,10 @@ LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) {
}
static void PushWord(std::vector<uint8_t>&buf, int data) {
- buf.push_back( data & 0xff);
- buf.push_back( (data >> 8) & 0xff);
- buf.push_back( (data >> 16) & 0xff);
- buf.push_back( (data >> 24) & 0xff);
+ buf.push_back(data & 0xff);
+ buf.push_back((data >> 8) & 0xff);
+ buf.push_back((data >> 16) & 0xff);
+ buf.push_back((data >> 24) & 0xff);
}
static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
@@ -454,8 +454,8 @@ void Mir2Lir::InstallFillArrayData() {
if (tab_rec == NULL) break;
AlignBuffer(code_buffer_, tab_rec->offset);
for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
- code_buffer_.push_back( tab_rec->table[i] & 0xFF);
- code_buffer_.push_back( (tab_rec->table[i] >> 8) & 0xFF);
+ code_buffer_.push_back(tab_rec->table[i] & 0xFF);
+ code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF);
}
}
}
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index a34d2a9..d1bfd2d 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -279,7 +279,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
int r_dst = AllocTemp();
int r_idx = AllocTemp();
int r_val = INVALID_REG;
- switch(cu_->instruction_set) {
+ switch (cu_->instruction_set) {
case kThumb2:
r_val = TargetReg(kLr);
break;
@@ -1311,7 +1311,7 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
}
// NOTE: callout here is not a safepoint
- CallHelper(r_tgt, func_offset, false /* not a safepoint */ );
+ CallHelper(r_tgt, func_offset, false /* not a safepoint */);
if (op == kOpDiv)
rl_result = GetReturn(false);
else
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 8510006..127d191 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -107,7 +107,7 @@ LIR* MipsMir2Lir::LoadConstantNoClobber(int r_dest, int value) {
}
LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) {
- LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/ );
+ LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/);
res->target = target;
return res;
}
@@ -642,8 +642,8 @@ LIR* MipsMir2Lir::OpMem(OpKind op, int rBase, int disp) {
return NULL;
}
-LIR* MipsMir2Lir::StoreBaseIndexedDisp( int rBase, int r_index, int scale, int displacement,
- int r_src, int r_src_hi, OpSize size, int s_reg) {
+LIR* MipsMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+ int r_src, int r_src_hi, OpSize size, int s_reg) {
LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS";
return NULL;
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 41e5a2d..7765eaa 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -233,7 +233,7 @@ class Mir2Lir : public Backend {
RegisterClass oat_reg_class_by_size(OpSize size) {
return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte ||
- size == kSignedByte ) ? kCoreReg : kAnyReg;
+ size == kSignedByte) ? kCoreReg : kAnyReg;
}
size_t CodeBufferSizeInBytes() {
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 4c91223..bc3740a 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -1021,8 +1021,7 @@ void Mir2Lir::DoPromotion() {
if (!(cu_->disable_opt & (1 << kPromoteRegs))) {
// Promote FpRegs
- for (int i = 0; (i < num_regs) &&
- (FpRegs[i].count >= promotion_threshold ); i++) {
+ for (int i = 0; (i < num_regs) && (FpRegs[i].count >= promotion_threshold); i++) {
int p_map_idx = SRegToPMap(FpRegs[i].s_reg);
if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) {
int reg = AllocPreservedFPReg(FpRegs[i].s_reg,
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 6376e3b..75367a3 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -100,7 +100,7 @@ LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) {
}
LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
- LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/ );
+ LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
res->target = target;
return res;
}
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 3a0cbcc..7739e29 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -266,7 +266,7 @@ bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
int num_total_blocks = GetBasicBlockListCount();
- if (bb->dominators == NULL ) {
+ if (bb->dominators == NULL) {
bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
false /* expandable */, kBitMapDominators);
bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 10bbd1f..f361dd7 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -160,7 +160,7 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb) {
if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
(mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
(mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
- switch(cu_->shorty[0]) {
+ switch (cu_->shorty[0]) {
case 'I':
changed |= SetCore(ssa_rep->uses[0], true);
break;