summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_build.mk3
-rw-r--r--compiler/dex/gvn_dead_code_elimination.cc1
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc5
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc4
-rw-r--r--compiler/linker/relative_patcher.cc5
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc1
-rw-r--r--compiler/optimizing/code_generator.cc2
-rw-r--r--compiler/optimizing/code_generator_arm.cc5
-rw-r--r--compiler/optimizing/code_generator_x86.cc4
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc4
-rw-r--r--compiler/optimizing/intrinsics.cc6
-rw-r--r--compiler/utils/arm/assembler_arm.cc6
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc1
-rw-r--r--compiler/utils/arm64/assembler_arm64.h2
-rw-r--r--disassembler/disassembler_mips.cc1
-rw-r--r--disassembler/disassembler_mips64.cc1
-rw-r--r--runtime/base/macros.h2
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc23
-rw-r--r--runtime/gc/collector/concurrent_copying.cc3
-rw-r--r--runtime/gc/collector/concurrent_copying.h2
-rw-r--r--runtime/gc/heap.cc1
-rw-r--r--runtime/gc/task_processor.cc1
-rw-r--r--runtime/mirror/object.h3
-rw-r--r--runtime/parsed_options.cc1
-rw-r--r--runtime/quick_exception_handler.h2
-rw-r--r--runtime/verifier/method_verifier.cc1
27 files changed, 37 insertions, 55 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 8b4be82..8eeeec6 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -141,6 +141,9 @@ art_clang_cflags += -Wused-but-marked-unused
# Enable warning for deprecated language features.
art_clang_cflags += -Wdeprecated
+# Enable warning for unreachable break & return, and missing NO_RETURN annotations.
+art_clang_cflags += -Wunreachable-code-break -Wunreachable-code-return -Wmissing-noreturn
+
# GCC-only warnings.
art_gcc_cflags := -Wunused-but-set-parameter
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index 2d4c18f..ec12221 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -1357,7 +1357,6 @@ bool GvnDeadCodeElimination::RecordMIR(MIR* mir) {
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
UNREACHABLE();
- break;
}
if (mir->ssa_rep->num_defs != 0) {
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index f48290d..e9ad8ba 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -589,13 +589,11 @@ LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r
DCHECK_EQ(shift, 0);
// Binary, but rm is encoded twice.
return NewLIR2(kA64Rev2rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
- break;
case kOpRevsh:
// Binary, but rm is encoded twice.
NewLIR2(kA64Rev162rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
// "sxth r1, r2" is "sbfm r1, r2, #0, #15"
return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_dest_src1.GetReg(), 0, 15);
- break;
case kOp2Byte:
DCHECK_EQ(shift, ENCODE_NO_SHIFT);
// "sbfx r1, r2, #imm1, #imm2" is "sbfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
@@ -645,10 +643,9 @@ LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage
// Note: intentional fallthrough
case kOpSub:
return OpRegRegRegExtend(op, r_dest_src1, r_dest_src1, r_src2, ext, amount);
- break;
default:
LOG(FATAL) << "Bad Opcode: " << opcode;
- break;
+ UNREACHABLE();
}
DCHECK(!IsPseudoLirOp(opcode));
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index bf0e0fc..8ab5422 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -283,9 +283,9 @@ LIR* MipsMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
break;
case kOpBx:
return NewLIR2(kMipsJalr, rZERO, r_dest_src.GetReg());
- break;
default:
LOG(FATAL) << "Bad case in OpReg";
+ UNREACHABLE();
}
return NewLIR2(opcode, cu_->target64 ? rRAd : rRA, r_dest_src.GetReg());
}
@@ -295,8 +295,8 @@ LIR* MipsMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
} else {
LOG(FATAL) << "Bad case in OpRegImm";
+ UNREACHABLE();
}
- UNREACHABLE();
}
LIR* MipsMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc
index 8ee87aa..89aed95 100644
--- a/compiler/linker/relative_patcher.cc
+++ b/compiler/linker/relative_patcher.cc
@@ -67,22 +67,17 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create(
switch (instruction_set) {
case kX86:
return std::unique_ptr<RelativePatcher>(new X86RelativePatcher());
- break;
case kX86_64:
return std::unique_ptr<RelativePatcher>(new X86_64RelativePatcher());
- break;
case kArm:
// Fall through: we generate Thumb2 code for "arm".
case kThumb2:
return std::unique_ptr<RelativePatcher>(new Thumb2RelativePatcher(provider));
- break;
case kArm64:
return std::unique_ptr<RelativePatcher>(
new Arm64RelativePatcher(provider, features->AsArm64InstructionSetFeatures()));
- break;
default:
return std::unique_ptr<RelativePatcher>(new RelativePatcherNone);
- break;
}
}
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index dce02f7..6511120 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -239,7 +239,6 @@ class ValueBound : public ValueObject {
*underflow = true;
return Min();
}
- return ValueBound(instruction_, new_constant);
}
private:
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 9b1ef17..da28dc7 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -132,7 +132,6 @@ size_t CodeGenerator::FindFreeEntry(bool* array, size_t length) {
}
LOG(FATAL) << "Could not find a register in baseline register allocator";
UNREACHABLE();
- return -1;
}
size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) {
@@ -145,7 +144,6 @@ size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t l
}
LOG(FATAL) << "Could not find a register in baseline register allocator";
UNREACHABLE();
- return -1;
}
void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index f5e4df1..cfc798a 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -562,7 +562,6 @@ Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
- break;
case Primitive::kPrimInt:
case Primitive::kPrimNot:
@@ -575,10 +574,11 @@ Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
case Primitive::kPrimShort:
case Primitive::kPrimVoid:
LOG(FATAL) << "Unexpected type " << load->GetType();
+ UNREACHABLE();
}
LOG(FATAL) << "Unreachable";
- return Location();
+ UNREACHABLE();
}
Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
@@ -683,7 +683,6 @@ Location InvokeDexCallingConventionVisitor::GetReturnLocation(Primitive::Type ty
return Location();
}
UNREACHABLE();
- return Location();
}
void CodeGeneratorARM::Move32(Location destination, Location source) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f79dbc3..92b62e2 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -514,7 +514,6 @@ Location CodeGeneratorX86::GetStackLocation(HLoadLocal* load) const {
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
- break;
case Primitive::kPrimInt:
case Primitive::kPrimNot:
@@ -527,10 +526,11 @@ Location CodeGeneratorX86::GetStackLocation(HLoadLocal* load) const {
case Primitive::kPrimShort:
case Primitive::kPrimVoid:
LOG(FATAL) << "Unexpected type " << load->GetType();
+ UNREACHABLE();
}
LOG(FATAL) << "Unreachable";
- return Location();
+ UNREACHABLE();
}
Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 9958451..cdbc778 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -555,7 +555,6 @@ Location CodeGeneratorX86_64::GetStackLocation(HLoadLocal* load) const {
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
- break;
case Primitive::kPrimInt:
case Primitive::kPrimNot:
@@ -568,10 +567,11 @@ Location CodeGeneratorX86_64::GetStackLocation(HLoadLocal* load) const {
case Primitive::kPrimShort:
case Primitive::kPrimVoid:
LOG(FATAL) << "Unexpected type " << load->GetType();
+ UNREACHABLE();
}
LOG(FATAL) << "Unreachable";
- return Location();
+ UNREACHABLE();
}
void CodeGeneratorX86_64::Move(Location destination, Location source) {
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 628a844..20aa45f 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -90,7 +90,6 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
- break;
case kIntrinsicReverseBytes:
switch (GetType(method.d.data, true)) {
case Primitive::kPrimShort:
@@ -103,7 +102,6 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
- break;
// Abs.
case kIntrinsicAbsDouble:
@@ -166,7 +164,6 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
- break;
// Memory.poke.
case kIntrinsicPoke:
@@ -183,7 +180,6 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
- break;
// String.
case kIntrinsicCharAt:
@@ -211,7 +207,6 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
- break;
case kIntrinsicUnsafeGet: {
const bool is_volatile = (method.d.data & kIntrinsicFlagIsVolatile);
switch (GetType(method.d.data, false)) {
@@ -225,7 +220,6 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
- break;
}
case kIntrinsicUnsafePut: {
enum Sync { kNoSync, kVolatile, kOrdered };
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index a02191b..8059289 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -89,7 +89,6 @@ uint32_t ShifterOperand::encodingArm() const {
} else {
return immed_;
}
- break;
case kRegister:
if (is_shift_) {
uint32_t shift_type;
@@ -121,7 +120,6 @@ uint32_t ShifterOperand::encodingArm() const {
// Simple register
return static_cast<uint32_t>(rm_);
}
- break;
default:
// Can't get here.
LOG(FATAL) << "Invalid shifter operand for ARM";
@@ -156,13 +154,11 @@ uint32_t ShifterOperand::encodingThumb() const {
// Simple register
return static_cast<uint32_t>(rm_);
}
- break;
default:
// Can't get here.
LOG(FATAL) << "Invalid shifter operand for thumb";
- return 0;
+ UNREACHABLE();
}
- return 0;
}
uint32_t Address::encodingArm() const {
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 3e8d9c3..6286b10 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -749,7 +749,6 @@ bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
break;
case TEQ:
return true;
- break;
case ADD:
case SUB:
break;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 2031fe4..8973b9c 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -30,7 +30,9 @@
// TODO: make vixl clean wrt -Wshadow.
#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunknown-pragmas"
#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wmissing-noreturn"
#include "vixl/a64/macro-assembler-a64.h"
#include "vixl/a64/disasm-a64.h"
#pragma GCC diagnostic pop
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index b27b555..e2b7341 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -228,7 +228,6 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
}
continue; // No ", ".
}
- break;
case 'I': // Upper 16-bit immediate.
args << reinterpret_cast<void*>((instruction & 0xffff) << 16);
break;
diff --git a/disassembler/disassembler_mips64.cc b/disassembler/disassembler_mips64.cc
index 7b289d0..1b6e6be 100644
--- a/disassembler/disassembler_mips64.cc
+++ b/disassembler/disassembler_mips64.cc
@@ -233,7 +233,6 @@ static void DumpMips64(std::ostream& os, const uint8_t* instr_ptr) {
}
continue; // No ", ".
}
- break;
case 'I': // Upper 16-bit immediate.
args << reinterpret_cast<void*>((instruction & 0xffff) << 16);
break;
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 3a9de5f..6c33232 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -66,7 +66,7 @@ friend class test_set_name##_##individual_test##_Test
// A macro to disallow new and delete operators for a class. It goes in the private: declarations.
#define DISALLOW_ALLOCATION() \
public: \
- ALWAYS_INLINE void operator delete(void*, size_t) { UNREACHABLE(); } \
+ NO_RETURN ALWAYS_INLINE void operator delete(void*, size_t) { UNREACHABLE(); } \
private: \
void* operator new(size_t) = delete
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index d88d262..6a8aaf2 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -27,7 +27,7 @@
namespace art {
-extern "C" void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->SetException(Thread::GetDeoptimizationException());
self->QuickDeliverException();
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 70317bb..9644b98 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -24,14 +24,14 @@
namespace art {
// Deliver an exception that's pending on thread helping set up a callee save frame on the way.
-extern "C" void artDeliverPendingExceptionFromCode(Thread* self)
+extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->QuickDeliverException();
}
// Called by generated call to throw an exception.
-extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
+extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
/*
* exception may be NULL, in which case this routine should
@@ -50,7 +50,7 @@ extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread
}
// Called by generated call to throw a NPE exception.
-extern "C" void artThrowNullPointerExceptionFromCode(Thread* self)
+extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
@@ -60,7 +60,7 @@ extern "C" void artThrowNullPointerExceptionFromCode(Thread* self)
}
// Called by generated call to throw an arithmetic divide by zero exception.
-extern "C" void artThrowDivZeroFromCode(Thread* self)
+extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArithmeticExceptionDivideByZero();
@@ -68,14 +68,14 @@ extern "C" void artThrowDivZeroFromCode(Thread* self)
}
// Called by generated call to throw an array index out of bounds exception.
-extern "C" void artThrowArrayBoundsFromCode(int index, int length, Thread* self)
+extern "C" NO_RETURN void artThrowArrayBoundsFromCode(int index, int length, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayIndexOutOfBoundsException(index, length);
self->QuickDeliverException();
}
-extern "C" void artThrowStackOverflowFromCode(Thread* self)
+extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
@@ -84,15 +84,16 @@ extern "C" void artThrowStackOverflowFromCode(Thread* self)
self->QuickDeliverException();
}
-extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self)
+extern "C" NO_RETURN void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowNoSuchMethodError(method_idx);
self->QuickDeliverException();
}
-extern "C" void artThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type,
- Thread* self)
+extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type,
+ mirror::Class* src_type,
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
DCHECK(!dest_type->IsAssignableFrom(src_type));
@@ -100,8 +101,8 @@ extern "C" void artThrowClassCastException(mirror::Class* dest_type, mirror::Cla
self->QuickDeliverException();
}
-extern "C" void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
- Thread* self)
+extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayStoreException(value->GetClass(), array->GetClass());
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 56919bd..8aa1b52 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -804,6 +804,9 @@ class ConcurrentCopyingClearBlackPtrsVisitor {
public:
explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
: collector_(cc) {}
+#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
+ NO_RETURN
+#endif
void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
DCHECK(obj != nullptr);
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index a87053d..93de035 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -232,7 +232,7 @@ class ConcurrentCopying : public GarbageCollector {
bool IsOnAllocStack(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* GetFwdPtr(mirror::Object* from_ref)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FlipThreadRoots() LOCKS_EXCLUDED(Locks::mutator_lock_);;
+ void FlipThreadRoots() LOCKS_EXCLUDED(Locks::mutator_lock_);
void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RecordLiveStackFreezeSize(Thread* self);
void ComputeUnevacFromSpaceLiveRatio();
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d80bba6..b9153c1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -504,7 +504,6 @@ MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_be
// Retry a second time with no specified request begin.
request_begin = nullptr;
}
- return nullptr;
}
bool Heap::MayUseCollector(CollectorType type) const {
diff --git a/runtime/gc/task_processor.cc b/runtime/gc/task_processor.cc
index 1a3c6f5..2ca4b3f 100644
--- a/runtime/gc/task_processor.cc
+++ b/runtime/gc/task_processor.cc
@@ -67,7 +67,6 @@ HeapTask* TaskProcessor::GetTask(Thread* self) {
}
}
UNREACHABLE();
- return nullptr;
}
void TaskProcessor::UpdateTargetRunTime(Thread* self, HeapTask* task, uint64_t new_target_time) {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index b730670..cfc8549 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -90,6 +90,9 @@ class MANAGED LOCKABLE Object {
void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Object* GetReadBarrierPointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
+ NO_RETURN
+#endif
void SetReadBarrierPointer(Object* rb_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 89779bc..c23f744 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -413,7 +413,6 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
}
UNREACHABLE();
- return false;
}
using M = RuntimeArgumentMap;
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 8cccec8..7ee4118 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -38,7 +38,7 @@ class QuickExceptionHandler {
QuickExceptionHandler(Thread* self, bool is_deoptimization)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ~QuickExceptionHandler() {
+ NO_RETURN ~QuickExceptionHandler() {
LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump.
UNREACHABLE();
}
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index c6db7e5..d0f8468 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1075,7 +1075,6 @@ bool MethodVerifier::GetBranchOffset(uint32_t cur_offset, int32_t* pOffset, bool
break;
default:
return false;
- break;
}
return true;
}