summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2014-10-31 00:33:20 -0700
committerIan Rogers <irogers@google.com>2014-11-03 20:01:04 -0800
commit6a3c1fcb4ba42ad4d5d142c17a3712a6ddd3866f (patch)
tree9df58b57af13240a93a6da4eefcf03f70cce9ad9 /runtime
parentc6e0955737e15f7c0c3575d4e13789b3411f4993 (diff)
downloadart-6a3c1fcb4ba42ad4d5d142c17a3712a6ddd3866f.zip
art-6a3c1fcb4ba42ad4d5d142c17a3712a6ddd3866f.tar.gz
art-6a3c1fcb4ba42ad4d5d142c17a3712a6ddd3866f.tar.bz2
Remove -Wno-unused-parameter and -Wno-sign-promo from base cflags.
Fix associated errors about unused paramenters and implict sign conversions. For sign conversion this was largely in the area of enums, so add ostream operators for the effected enums and fix tools/generate-operator-out.py. Tidy arena allocation code and arena allocated data types, rather than fixing new and delete operators. Remove dead code. Change-Id: I5b433e722d2f75baacfacae4d32aef4a828bfe1b
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.mk8
-rw-r--r--runtime/arch/arch_test.cc2
-rw-r--r--runtime/arch/stub_test.cc60
-rw-r--r--runtime/arch/x86/context_x86.cc10
-rw-r--r--runtime/arch/x86/context_x86.h10
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc8
-rw-r--r--runtime/base/allocator.cc12
-rw-r--r--runtime/base/allocator.h2
-rw-r--r--runtime/base/macros.h14
-rw-r--r--runtime/base/unix_file/mapped_file.cc166
-rw-r--r--runtime/base/unix_file/mapped_file.h102
-rw-r--r--runtime/base/unix_file/mapped_file_test.cc272
-rw-r--r--runtime/base/unix_file/null_file.cc6
-rw-r--r--runtime/check_jni.cc8
-rw-r--r--runtime/class_linker-inl.h5
-rw-r--r--runtime/class_linker.cc14
-rw-r--r--runtime/class_linker.h8
-rw-r--r--runtime/common_runtime_test.h2
-rw-r--r--runtime/debugger.cc11
-rw-r--r--runtime/debugger.h3
-rw-r--r--runtime/dex_file.h2
-rw-r--r--runtime/dex_instruction.h7
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h11
-rw-r--r--runtime/entrypoints/entrypoint_utils.h3
-rw-r--r--runtime/entrypoints/portable/portable_fillarray_entrypoints.cc1
-rw-r--r--runtime/entrypoints/portable/portable_trampoline_entrypoints.cc1
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc9
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc22
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc2
-rw-r--r--runtime/exception_test.cc2
-rw-r--r--runtime/fault_handler.cc2
-rw-r--r--runtime/gc/accounting/mod_union_table.cc2
-rw-r--r--runtime/gc/accounting/remembered_set.cc1
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc2
-rw-r--r--runtime/gc/allocator/dlmalloc.cc12
-rw-r--r--runtime/gc/allocator/rosalloc.cc6
-rw-r--r--runtime/gc/allocator/rosalloc.h23
-rw-r--r--runtime/gc/allocator_type.h3
-rw-r--r--runtime/gc/collector/concurrent_copying.h4
-rw-r--r--runtime/gc/collector/mark_sweep.cc1
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.cc1
-rw-r--r--runtime/gc/heap.cc15
-rw-r--r--runtime/gc/heap.h3
-rw-r--r--runtime/gc/space/dlmalloc_space.cc1
-rw-r--r--runtime/gc/space/large_object_space.cc6
-rw-r--r--runtime/gc/space/valgrind_malloc_space.h1
-rw-r--r--runtime/gc/space/zygote_space.cc22
-rw-r--r--runtime/handle_scope.h2
-rw-r--r--runtime/instruction_set.cc5
-rw-r--r--runtime/instrumentation.cc1
-rw-r--r--runtime/instrumentation.h15
-rw-r--r--runtime/intern_table.cc3
-rw-r--r--runtime/interpreter/interpreter.cc7
-rw-r--r--runtime/jdwp/jdwp_handler.cc1
-rw-r--r--runtime/jdwp/object_registry.cc1
-rw-r--r--runtime/jni_internal.cc3
-rw-r--r--runtime/lock_word.h2
-rw-r--r--runtime/mem_map.cc3
-rw-r--r--runtime/mirror/array-inl.h1
-rw-r--r--runtime/mirror/class.h2
-rw-r--r--runtime/mirror/object-inl.h2
-rw-r--r--runtime/monitor_pool.h3
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc16
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc3
-rw-r--r--runtime/native/java_lang_reflect_Field.cc20
-rw-r--r--runtime/noop_compiler_callbacks.h4
-rw-r--r--runtime/parsed_options.cc2
-rw-r--r--runtime/profiler_options.h1
-rw-r--r--runtime/quick_exception_handler.cc4
-rw-r--r--runtime/read_barrier-inl.h4
-rw-r--r--runtime/reflection.cc5
-rw-r--r--runtime/runtime.cc10
-rw-r--r--runtime/runtime.h8
-rw-r--r--runtime/stack.h6
-rw-r--r--runtime/thread_list.cc8
-rw-r--r--runtime/thread_pool.cc50
-rw-r--r--runtime/thread_pool.h4
-rw-r--r--runtime/thread_state.h3
-rw-r--r--runtime/trace.cc26
-rw-r--r--runtime/transaction.cc62
-rw-r--r--runtime/transaction.h17
-rw-r--r--runtime/utils.h7
82 files changed, 359 insertions, 840 deletions
diff --git a/runtime/Android.mk b/runtime/Android.mk
index a2fc24a..4505b8e 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -31,7 +31,6 @@ LIBART_COMMON_SRC_FILES := \
base/stringprintf.cc \
base/timing_logger.cc \
base/unix_file/fd_file.cc \
- base/unix_file/mapped_file.cc \
base/unix_file/null_file.cc \
base/unix_file/random_access_file_utils.cc \
base/unix_file/string_file.cc \
@@ -296,12 +295,16 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
arch/x86_64/registers_x86_64.h \
base/allocator.h \
base/mutex.h \
+ debugger.h \
dex_file.h \
dex_instruction.h \
+ gc/allocator/rosalloc.h \
gc/collector/gc_type.h \
+ gc/allocator_type.h \
gc/collector_type.h \
gc/space/space.h \
gc/heap.h \
+ instrumentation.h \
indirect_reference_table.h \
instruction_set.h \
invoke_type.h \
@@ -311,7 +314,10 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
mirror/class.h \
oat.h \
object_callbacks.h \
+ profiler_options.h \
quick/inline_method_analyser.h \
+ runtime.h \
+ stack.h \
thread.h \
thread_state.h \
verifier/method_verifier.h
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 42bf8fb..cac500c 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -32,7 +32,7 @@ class ArchTest : public CommonRuntimeTest {
t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
r->SetInstructionSet(isa);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(type);
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, type);
QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index c5a0f6c..b0928f8 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -41,7 +41,7 @@ class StubTest : public CommonRuntimeTest {
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
- runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(type), type);
+ runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
}
}
}
@@ -530,18 +530,6 @@ class StubTest : public CommonRuntimeTest {
#endif
}
- // Method with 32b arg0, 32b arg1, 64b arg2
- size_t Invoke3UUWithReferrer(uint32_t arg0, uint32_t arg1, uint64_t arg2, uintptr_t code,
- Thread* self, mirror::ArtMethod* referrer) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
- // Just pass through.
- return Invoke3WithReferrer(arg0, arg1, arg2, code, self, referrer);
-#else
- // TODO: Needs 4-param invoke.
- return 0;
-#endif
- }
-
static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
int32_t offset;
#ifdef __LP64__
@@ -1303,8 +1291,8 @@ TEST_F(StubTest, StringCompareTo) {
}
-static void GetSetBooleanStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetBooleanStatic(Handle<mirror::ArtField>* f, Thread* self,
+ mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
constexpr size_t num_values = 5;
@@ -1332,8 +1320,8 @@ static void GetSetBooleanStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtF
std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
#endif
}
-static void GetSetByteStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetByteStatic(Handle<mirror::ArtField>* f, Thread* self,
+ mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
@@ -1362,7 +1350,7 @@ static void GetSetByteStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtFiel
static void GetSetBooleanInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
- Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+ Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint8_t values[] = { 0, true, 2, 128, 0xFF };
@@ -1427,8 +1415,8 @@ static void GetSetByteInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtFi
#endif
}
-static void GetSetCharStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetCharStatic(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
@@ -1455,8 +1443,8 @@ static void GetSetCharStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtFiel
std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
#endif
}
-static void GetSetShortStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetShortStatic(Handle<mirror::ArtField>* f, Thread* self,
+ mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
@@ -1549,8 +1537,8 @@ static void GetSetShortInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtF
#endif
}
-static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSet32Static(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
@@ -1637,8 +1625,8 @@ static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* se
}
#endif
-static void GetSetObjStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetObjStatic(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
@@ -1702,8 +1690,8 @@ static void GetSetObjInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtFie
// TODO: Complete these tests for 32b architectures.
-static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSet64Static(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
@@ -1724,6 +1712,7 @@ static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>
EXPECT_EQ(res, values[i]) << "Iteration " << i;
}
#else
+ UNUSED(f, self, referrer, test);
LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1760,6 +1749,7 @@ static void GetSet64Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtFiel
EXPECT_EQ(res, static_cast<int64_t>(res2));
}
#else
+ UNUSED(obj, f, self, referrer, test);
LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1796,40 +1786,40 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type)
switch (type) {
case Primitive::Type::kPrimBoolean:
if (test_type == type) {
- GetSetBooleanStatic(&obj, &f, self, m.Get(), test);
+ GetSetBooleanStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimByte:
if (test_type == type) {
- GetSetByteStatic(&obj, &f, self, m.Get(), test);
+ GetSetByteStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimChar:
if (test_type == type) {
- GetSetCharStatic(&obj, &f, self, m.Get(), test);
+ GetSetCharStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimShort:
if (test_type == type) {
- GetSetShortStatic(&obj, &f, self, m.Get(), test);
+ GetSetShortStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimInt:
if (test_type == type) {
- GetSet32Static(&obj, &f, self, m.Get(), test);
+ GetSet32Static(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimLong:
if (test_type == type) {
- GetSet64Static(&obj, &f, self, m.Get(), test);
+ GetSet64Static(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimNot:
// Don't try array.
if (test_type == type && f->GetTypeDescriptor()[0] != '[') {
- GetSetObjStatic(&obj, &f, self, m.Get(), test);
+ GetSetObjStatic(&f, self, m.Get(), test);
}
break;
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index 32eec57..49aa326 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -72,6 +72,16 @@ bool X86Context::SetGPR(uint32_t reg, uintptr_t value) {
}
}
+bool X86Context::GetFPR(uint32_t reg ATTRIBUTE_UNUSED, uintptr_t* val ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Floating-point registers are all caller save in X86";
+ UNREACHABLE();
+}
+
+bool X86Context::SetFPR(uint32_t reg ATTRIBUTE_UNUSED, uintptr_t value ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Floating-point registers are all caller save in X86";
+ UNREACHABLE();
+}
+
void X86Context::DoLongJump() {
#if defined(__i386__)
// Array of GPR values, filled from the context backward for the long jump pop. We add a slot at
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index a350b25..01c8b82 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -62,15 +62,9 @@ class X86Context : public Context {
bool SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
- bool GetFPR(uint32_t reg, uintptr_t* val) OVERRIDE {
- LOG(FATAL) << "Floating-point registers are all caller save in X86";
- return false;
- }
+ bool GetFPR(uint32_t reg, uintptr_t* val) OVERRIDE;
- bool SetFPR(uint32_t reg, uintptr_t value) OVERRIDE {
- LOG(FATAL) << "Floating-point registers are all caller save in X86";
- return false;
- }
+ bool SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
void SmashCallerSaves() OVERRIDE;
void DoLongJump() OVERRIDE;
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 9d74ef5..ad962e2 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -231,7 +231,7 @@ static uint32_t GetInstructionSize(const uint8_t* pc) {
return pc - startpc;
}
-void FaultManager::HandleNestedSignal(int sig, siginfo_t* info, void* context) {
+void FaultManager::HandleNestedSignal(int, siginfo_t*, void* context) {
// For the Intel architectures we need to go to an assembly language
// stub. This is because the 32 bit call to longjmp is much different
// from the 64 bit ABI call and pushing things onto the stack inside this
@@ -284,7 +284,7 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
*out_return_pc = reinterpret_cast<uintptr_t>(pc + instr_size);
}
-bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+bool NullPointerHandler::Action(int, siginfo_t*, void* context) {
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP);
@@ -324,7 +324,7 @@ bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
// The offset from fs is Thread::ThreadSuspendTriggerOffset().
// To check for a suspend check, we examine the instructions that caused
// the fault.
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+bool SuspensionHandler::Action(int, siginfo_t*, void* context) {
// These are the instructions to check for. The first one is the mov eax, fs:[xxx]
// where xxx is the offset of the suspend trigger.
#if defined(__x86_64__)
@@ -398,7 +398,7 @@ bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
// This is done before any frame is established in the method. The return
// address for the previous method is on the stack at ESP.
-bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+bool StackOverflowHandler::Action(int, siginfo_t* info, void* context) {
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
uintptr_t sp = static_cast<uintptr_t>(uc->CTX_ESP);
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
index 994e235..4f2fc07 100644
--- a/runtime/base/allocator.cc
+++ b/runtime/base/allocator.cc
@@ -30,11 +30,11 @@ class MallocAllocator FINAL : public Allocator {
explicit MallocAllocator() {}
~MallocAllocator() {}
- virtual void* Alloc(size_t size) {
+ void* Alloc(size_t size) {
return calloc(sizeof(uint8_t), size);
}
- virtual void Free(void* p) {
+ void Free(void* p) {
free(p);
}
@@ -49,13 +49,15 @@ class NoopAllocator FINAL : public Allocator {
explicit NoopAllocator() {}
~NoopAllocator() {}
- virtual void* Alloc(size_t size) {
+ void* Alloc(size_t size) {
+ UNUSED(size);
LOG(FATAL) << "NoopAllocator::Alloc should not be called";
- return NULL;
+ UNREACHABLE();
}
- virtual void Free(void* p) {
+ void Free(void* p) {
// Noop.
+ UNUSED(p);
}
private:
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 95dd407..3ca9ebb 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -114,6 +114,7 @@ class TrackingAllocatorImpl {
// Used internally by STL data structures.
template <class U>
TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc) throw() {
+ UNUSED(alloc);
}
// Used internally by STL data structures.
@@ -129,6 +130,7 @@ class TrackingAllocatorImpl {
};
pointer allocate(size_type n, const_pointer hint = 0) {
+ UNUSED(hint);
const size_t size = n * sizeof(T);
TrackedAllocators::RegisterAllocation(GetTag(), size);
return reinterpret_cast<pointer>(malloc(size));
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index febea61..90cf951 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -189,7 +189,19 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
#define PURE __attribute__ ((__pure__))
#define WARN_UNUSED __attribute__((warn_unused_result))
-template<typename T> void UNUSED(const T&) {}
+// A deprecated function to call to create a false use of the parameter, for example:
+// int foo(int x) { UNUSED(x); return 10; }
+// to avoid compiler warnings. Going forward we prefer ATTRIBUTE_UNUSED.
+template<typename... T> void UNUSED(const T&...) {}
+
+// An attribute to place on a parameter to a function, for example:
+// int foo(int x ATTRIBUTE_UNUSED) { return 10; }
+// to avoid compiler warnings.
+#define ATTRIBUTE_UNUSED __attribute__((__unused__))
+
+// Define that a position within code is unreachable, for example:
+// int foo () { LOG(FATAL) << "Don't call me"; UNREACHABLE(); }
+// without the UNREACHABLE a return statement would be necessary.
#define UNREACHABLE __builtin_unreachable
// The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
diff --git a/runtime/base/unix_file/mapped_file.cc b/runtime/base/unix_file/mapped_file.cc
deleted file mode 100644
index 77f4d02..0000000
--- a/runtime/base/unix_file/mapped_file.cc
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/logging.h"
-#include "base/unix_file/mapped_file.h"
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <algorithm>
-#include <string>
-
-namespace unix_file {
-
-MappedFile::~MappedFile() {
-}
-
-int MappedFile::Close() {
- if (IsMapped()) {
- Unmap();
- }
- return FdFile::Close();
-}
-
-bool MappedFile::MapReadOnly() {
- CHECK(IsOpened());
- CHECK(!IsMapped());
- struct stat st;
- int result = TEMP_FAILURE_RETRY(fstat(Fd(), &st));
- if (result == -1) {
- PLOG(::art::WARNING) << "Failed to stat file '" << GetPath() << "'";
- return false;
- }
- file_size_ = st.st_size;
- do {
- mapped_file_ = mmap(NULL, file_size_, PROT_READ, MAP_PRIVATE, Fd(), 0);
- } while (mapped_file_ == MAP_FAILED && errno == EINTR);
- if (mapped_file_ == MAP_FAILED) {
- PLOG(::art::WARNING) << "Failed to mmap file '" << GetPath() << "' of size "
- << file_size_ << " bytes to memory";
- return false;
- }
- map_mode_ = kMapReadOnly;
- return true;
-}
-
-bool MappedFile::MapReadWrite(int64_t file_size) {
- CHECK(IsOpened());
- CHECK(!IsMapped());
-#ifdef __linux__
- int result = TEMP_FAILURE_RETRY(ftruncate64(Fd(), file_size));
-#else
- int result = TEMP_FAILURE_RETRY(ftruncate(Fd(), file_size));
-#endif
- if (result == -1) {
- PLOG(::art::ERROR) << "Failed to truncate file '" << GetPath() << "' to size " << file_size;
- return false;
- }
- file_size_ = file_size;
- do {
- mapped_file_ =
- mmap(NULL, file_size_, PROT_READ | PROT_WRITE, MAP_SHARED, Fd(), 0);
- } while (mapped_file_ == MAP_FAILED && errno == EINTR);
- if (mapped_file_ == MAP_FAILED) {
- PLOG(::art::WARNING) << "Failed to mmap file '" << GetPath() << "' of size "
- << file_size_ << " bytes to memory";
- return false;
- }
- map_mode_ = kMapReadWrite;
- return true;
-}
-
-bool MappedFile::Unmap() {
- CHECK(IsMapped());
- int result = TEMP_FAILURE_RETRY(munmap(mapped_file_, file_size_));
- if (result == -1) {
- PLOG(::art::WARNING) << "Failed unmap file '" << GetPath() << "' of size " << file_size_;
- return false;
- } else {
- mapped_file_ = NULL;
- file_size_ = -1;
- return true;
- }
-}
-
-int64_t MappedFile::Read(char* buf, int64_t byte_count, int64_t offset) const {
- if (IsMapped()) {
- if (offset < 0) {
- errno = EINVAL;
- return -errno;
- }
- int64_t read_size = std::max(static_cast<int64_t>(0),
- std::min(byte_count, file_size_ - offset));
- if (read_size > 0) {
- memcpy(buf, data() + offset, read_size);
- }
- return read_size;
- } else {
- return FdFile::Read(buf, byte_count, offset);
- }
-}
-
-int MappedFile::SetLength(int64_t new_length) {
- CHECK(!IsMapped());
- return FdFile::SetLength(new_length);
-}
-
-int64_t MappedFile::GetLength() const {
- if (IsMapped()) {
- return file_size_;
- } else {
- return FdFile::GetLength();
- }
-}
-
-int MappedFile::Flush() {
- int rc = IsMapped() ? TEMP_FAILURE_RETRY(msync(mapped_file_, file_size_, 0)) : FdFile::Flush();
- return rc == -1 ? -errno : 0;
-}
-
-int64_t MappedFile::Write(const char* buf, int64_t byte_count, int64_t offset) {
- if (IsMapped()) {
- CHECK_EQ(kMapReadWrite, map_mode_);
- if (offset < 0) {
- errno = EINVAL;
- return -errno;
- }
- int64_t write_size = std::max(static_cast<int64_t>(0),
- std::min(byte_count, file_size_ - offset));
- if (write_size > 0) {
- memcpy(data() + offset, buf, write_size);
- }
- return write_size;
- } else {
- return FdFile::Write(buf, byte_count, offset);
- }
-}
-
-int64_t MappedFile::size() const {
- return GetLength();
-}
-
-bool MappedFile::IsMapped() const {
- return mapped_file_ != NULL && mapped_file_ != MAP_FAILED;
-}
-
-char* MappedFile::data() const {
- CHECK(IsMapped());
- return static_cast<char*>(mapped_file_);
-}
-
-} // namespace unix_file
diff --git a/runtime/base/unix_file/mapped_file.h b/runtime/base/unix_file/mapped_file.h
deleted file mode 100644
index 73056e9..0000000
--- a/runtime/base/unix_file/mapped_file.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_BASE_UNIX_FILE_MAPPED_FILE_H_
-#define ART_RUNTIME_BASE_UNIX_FILE_MAPPED_FILE_H_
-
-#include <fcntl.h>
-#include <string>
-#include "base/unix_file/fd_file.h"
-
-namespace unix_file {
-
-// Random access file which handles an mmap(2), munmap(2) pair in C++
-// RAII style. When a file is mmapped, the random access file
-// interface accesses the mmapped memory directly; otherwise, the
-// standard file I/O is used. Whenever a function fails, it returns
-// false and errno is set to the corresponding error code.
-class MappedFile : public FdFile {
- public:
- // File modes used in Open().
- enum FileMode {
-#ifdef __linux__
- kReadOnlyMode = O_RDONLY | O_LARGEFILE,
- kReadWriteMode = O_CREAT | O_RDWR | O_LARGEFILE,
-#else
- kReadOnlyMode = O_RDONLY,
- kReadWriteMode = O_CREAT | O_RDWR,
-#endif
- };
-
- MappedFile() : FdFile(), file_size_(-1), mapped_file_(NULL) {
- }
- // Creates a MappedFile using the given file descriptor. Takes ownership of
- // the file descriptor.
- explicit MappedFile(int fd) : FdFile(fd), file_size_(-1), mapped_file_(NULL) {
- }
-
- // Unmaps and closes the file if needed.
- virtual ~MappedFile();
-
- // Maps an opened file to memory in the read-only mode.
- bool MapReadOnly();
-
- // Maps an opened file to memory in the read-write mode. Before the
- // file is mapped, it is truncated to 'file_size' bytes.
- bool MapReadWrite(int64_t file_size);
-
- // Unmaps a mapped file so that, e.g., SetLength() may be invoked.
- bool Unmap();
-
- // RandomAccessFile API.
- // The functions below require that the file is open, but it doesn't
- // have to be mapped.
- virtual int Close();
- virtual int64_t Read(char* buf, int64_t byte_count, int64_t offset) const;
- // SetLength() requires that the file is not mmapped.
- virtual int SetLength(int64_t new_length);
- virtual int64_t GetLength() const;
- virtual int Flush();
- // Write() requires that, if the file is mmapped, it is mmapped in
- // the read-write mode. Writes past the end of file are discarded.
- virtual int64_t Write(const char* buf, int64_t byte_count, int64_t offset);
-
- // A convenience method equivalent to GetLength().
- int64_t size() const;
-
- // Returns true if the file has been mmapped.
- bool IsMapped() const;
-
- // Returns a pointer to the start of the memory mapping once the
- // file is successfully mapped; crashes otherwise.
- char* data() const;
-
- private:
- enum MapMode {
- kMapReadOnly = 1,
- kMapReadWrite = 2,
- };
-
- mutable int64_t file_size_; // May be updated in GetLength().
- void* mapped_file_;
- MapMode map_mode_;
-
- DISALLOW_COPY_AND_ASSIGN(MappedFile);
-};
-
-} // namespace unix_file
-
-#endif // ART_RUNTIME_BASE_UNIX_FILE_MAPPED_FILE_H_
diff --git a/runtime/base/unix_file/mapped_file_test.cc b/runtime/base/unix_file/mapped_file_test.cc
deleted file mode 100644
index 59334d4..0000000
--- a/runtime/base/unix_file/mapped_file_test.cc
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/unix_file/mapped_file.h"
-#include "base/logging.h"
-#include "base/unix_file/fd_file.h"
-#include "base/unix_file/random_access_file_test.h"
-#include "base/unix_file/random_access_file_utils.h"
-#include "base/unix_file/string_file.h"
-#include "gtest/gtest.h"
-
-namespace unix_file {
-
-class MappedFileTest : public RandomAccessFileTest {
- protected:
- MappedFileTest() : kContent("some content") {
- }
-
- void SetUp() {
- RandomAccessFileTest::SetUp();
-
- good_path_ = GetTmpPath("some-file.txt");
- int fd = TEMP_FAILURE_RETRY(open(good_path_.c_str(), O_CREAT|O_RDWR, 0666));
- FdFile dst(fd);
-
- StringFile src;
- src.Assign(kContent);
-
- ASSERT_TRUE(CopyFile(src, &dst));
- }
-
- void TearDown() {
- ASSERT_EQ(unlink(good_path_.c_str()), 0);
-
- RandomAccessFileTest::TearDown();
- }
-
- virtual RandomAccessFile* MakeTestFile() {
- TEMP_FAILURE_RETRY(truncate(good_path_.c_str(), 0));
- MappedFile* f = new MappedFile;
- CHECK(f->Open(good_path_, MappedFile::kReadWriteMode));
- return f;
- }
-
- const std::string kContent;
- std::string good_path_;
-};
-
-TEST_F(MappedFileTest, OkayToNotUse) {
- MappedFile file;
- EXPECT_EQ(-1, file.Fd());
- EXPECT_FALSE(file.IsOpened());
- EXPECT_FALSE(file.IsMapped());
-}
-
-TEST_F(MappedFileTest, OpenClose) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_GE(file.Fd(), 0);
- EXPECT_TRUE(file.IsOpened());
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.size()));
- EXPECT_EQ(0, file.Close());
- EXPECT_EQ(-1, file.Fd());
- EXPECT_FALSE(file.IsOpened());
-}
-
-TEST_F(MappedFileTest, OpenFdClose) {
- FILE* f = tmpfile();
- ASSERT_TRUE(f != NULL);
- MappedFile file(fileno(f));
- EXPECT_GE(file.Fd(), 0);
- EXPECT_TRUE(file.IsOpened());
- EXPECT_EQ(0, file.Close());
-}
-
-TEST_F(MappedFileTest, CanUseAfterMapReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_FALSE(file.IsMapped());
- EXPECT_TRUE(file.MapReadOnly());
- EXPECT_TRUE(file.IsMapped());
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.size()));
- ASSERT_TRUE(file.data());
- EXPECT_EQ(0, memcmp(kContent.c_str(), file.data(), file.size()));
- EXPECT_EQ(0, file.Flush());
-}
-
-TEST_F(MappedFileTest, CanUseAfterMapReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- EXPECT_FALSE(file.IsMapped());
- EXPECT_TRUE(file.MapReadWrite(1));
- EXPECT_TRUE(file.IsMapped());
- EXPECT_EQ(1, file.size());
- ASSERT_TRUE(file.data());
- EXPECT_EQ(kContent[0], *file.data());
- EXPECT_EQ(0, file.Flush());
-}
-
-TEST_F(MappedFileTest, CanWriteNewData) {
- const std::string new_path(GetTmpPath("new-file.txt"));
- ASSERT_EQ(-1, unlink(new_path.c_str()));
- ASSERT_EQ(ENOENT, errno);
-
- MappedFile file;
- ASSERT_TRUE(file.Open(new_path, MappedFile::kReadWriteMode));
- EXPECT_TRUE(file.MapReadWrite(kContent.size()));
- EXPECT_TRUE(file.IsMapped());
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.size()));
- ASSERT_TRUE(file.data());
- memcpy(file.data(), kContent.c_str(), kContent.size());
- EXPECT_EQ(0, file.Close());
- EXPECT_FALSE(file.IsMapped());
-
- FdFile new_file(TEMP_FAILURE_RETRY(open(new_path.c_str(), O_RDONLY)));
- StringFile buffer;
- ASSERT_TRUE(CopyFile(new_file, &buffer));
- EXPECT_EQ(kContent, buffer.ToStringPiece());
- EXPECT_EQ(0, unlink(new_path.c_str()));
-}
-
-TEST_F(MappedFileTest, FileMustExist) {
- const std::string bad_path(GetTmpPath("does-not-exist.txt"));
- MappedFile file;
- EXPECT_FALSE(file.Open(bad_path, MappedFile::kReadOnlyMode));
- EXPECT_EQ(-1, file.Fd());
-}
-
-TEST_F(MappedFileTest, FileMustBeWritable) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_FALSE(file.MapReadWrite(10));
-}
-
-TEST_F(MappedFileTest, RemappingAllowedUntilSuccess) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_FALSE(file.MapReadWrite(10));
- EXPECT_FALSE(file.MapReadWrite(10));
-}
-
-TEST_F(MappedFileTest, ResizeMappedFile) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(10));
- EXPECT_EQ(10, file.GetLength());
- EXPECT_TRUE(file.Unmap());
- EXPECT_TRUE(file.MapReadWrite(20));
- EXPECT_EQ(20, file.GetLength());
- EXPECT_EQ(0, file.Flush());
- EXPECT_TRUE(file.Unmap());
- EXPECT_EQ(0, file.Flush());
- EXPECT_EQ(0, file.SetLength(5));
- EXPECT_TRUE(file.MapReadOnly());
- EXPECT_EQ(5, file.GetLength());
-}
-
-TEST_F(MappedFileTest, ReadNotMapped) {
- TestRead();
-}
-
-TEST_F(MappedFileTest, SetLengthNotMapped) {
- TestSetLength();
-}
-
-TEST_F(MappedFileTest, WriteNotMapped) {
- TestWrite();
-}
-
-TEST_F(MappedFileTest, ReadMappedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- TestReadContent(kContent, &file);
-}
-
-TEST_F(MappedFileTest, ReadMappedReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(kContent.size()));
- TestReadContent(kContent, &file);
-}
-
-TEST_F(MappedFileTest, WriteMappedReadWrite) {
- TEMP_FAILURE_RETRY(unlink(good_path_.c_str()));
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(kContent.size()));
-
- // Can't write to a negative offset.
- EXPECT_EQ(-EINVAL, file.Write(kContent.c_str(), 0, -123));
-
- // A zero-length write is a no-op.
- EXPECT_EQ(0, file.Write(kContent.c_str(), 0, 0));
- // But the file size is as given when mapped.
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.GetLength()));
-
- // Data written past the end are discarded.
- EXPECT_EQ(kContent.size() - 1,
- static_cast<uint64_t>(file.Write(kContent.c_str(), kContent.size(), 1)));
- EXPECT_EQ(0, memcmp(kContent.c_str(), file.data() + 1, kContent.size() - 1));
-
- // Data can be overwritten.
- EXPECT_EQ(kContent.size(),
- static_cast<uint64_t>(file.Write(kContent.c_str(), kContent.size(), 0)));
- EXPECT_EQ(0, memcmp(kContent.c_str(), file.data(), kContent.size()));
-}
-
-#if 0 // death tests don't work on android yet
-
-class MappedFileDeathTest : public MappedFileTest {};
-
-TEST_F(MappedFileDeathTest, MustMapBeforeUse) {
- MappedFile file;
- EXPECT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_DEATH(file.data(), "mapped_");
-}
-
-TEST_F(MappedFileDeathTest, RemappingNotAllowedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- EXPECT_DEATH(file.MapReadOnly(), "mapped_");
-}
-
-TEST_F(MappedFileDeathTest, RemappingNotAllowedReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(10));
- EXPECT_DEATH(file.MapReadWrite(10), "mapped_");
-}
-
-TEST_F(MappedFileDeathTest, SetLengthMappedReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(10));
- EXPECT_EQ(10, file.GetLength());
- EXPECT_DEATH(file.SetLength(0), ".*");
-}
-
-TEST_F(MappedFileDeathTest, SetLengthMappedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- EXPECT_EQ(kContent.size(), file.GetLength());
- EXPECT_DEATH(file.SetLength(0), ".*");
-}
-
-TEST_F(MappedFileDeathTest, WriteMappedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- char buf[10];
- EXPECT_DEATH(file.Write(buf, 0, 0), ".*");
-}
-
-#endif
-
-} // namespace unix_file
diff --git a/runtime/base/unix_file/null_file.cc b/runtime/base/unix_file/null_file.cc
index 050decb..322c25a 100644
--- a/runtime/base/unix_file/null_file.cc
+++ b/runtime/base/unix_file/null_file.cc
@@ -33,7 +33,8 @@ int NullFile::Flush() {
return 0;
}
-int64_t NullFile::Read(char* buf, int64_t byte_count, int64_t offset) const {
+int64_t NullFile::Read(char* buf ATTRIBUTE_UNUSED, int64_t byte_count ATTRIBUTE_UNUSED,
+ int64_t offset) const {
if (offset < 0) {
return -EINVAL;
}
@@ -51,7 +52,8 @@ int64_t NullFile::GetLength() const {
return 0;
}
-int64_t NullFile::Write(const char* buf, int64_t byte_count, int64_t offset) {
+int64_t NullFile::Write(const char* buf ATTRIBUTE_UNUSED, int64_t byte_count ATTRIBUTE_UNUSED,
+ int64_t offset) {
if (offset < 0) {
return -EINVAL;
}
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index b2df091..ef5ccb6 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -512,7 +512,7 @@ class ScopedCheck {
return true;
}
- bool CheckReferenceKind(IndirectRefKind expected_kind, JavaVMExt* vm, Thread* self, jobject obj) {
+ bool CheckReferenceKind(IndirectRefKind expected_kind, Thread* self, jobject obj) {
IndirectRefKind found_kind;
if (expected_kind == kLocal) {
found_kind = GetIndirectRefKind(obj);
@@ -2398,7 +2398,7 @@ class CheckJNI {
}
if (sc.Check(soa, false, "L", &result)) {
DCHECK_EQ(IsSameObject(env, obj, result.L), JNI_TRUE);
- DCHECK(sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), result.L));
+ DCHECK(sc.CheckReferenceKind(kind, soa.Self(), result.L));
return result.L;
}
}
@@ -2410,7 +2410,7 @@ class CheckJNI {
ScopedCheck sc(kFlag_ExcepOkay, function_name);
JniValueType args[2] = {{.E = env}, {.L = obj}};
sc.Check(soa, true, "EL", args);
- if (sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), obj)) {
+ if (sc.CheckReferenceKind(kind, soa.Self(), obj)) {
JniValueType result;
switch (kind) {
case kGlobal:
@@ -3116,7 +3116,7 @@ class CheckJNI {
static jarray NewPrimitiveArray(const char* function_name, JNIEnv* env, jsize length,
Primitive::Type type) {
ScopedObjectAccess soa(env);
- ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ ScopedCheck sc(kFlag_Default, function_name);
JniValueType args[2] = {{.E = env}, {.z = length}};
if (sc.Check(soa, true, "Ez", args)) {
JniValueType result;
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 875efbb..ead3fa5 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -105,8 +105,7 @@ inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, mirror::ArtFie
}
inline mirror::ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx,
- mirror::ArtMethod* referrer,
- InvokeType type) {
+ mirror::ArtMethod* referrer) {
mirror::ArtMethod* resolved_method = referrer->GetDexCacheResolvedMethod(method_idx);
if (resolved_method == nullptr || resolved_method->IsRuntimeMethod()) {
return nullptr;
@@ -117,7 +116,7 @@ inline mirror::ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx,
inline mirror::ArtMethod* ClassLinker::ResolveMethod(Thread* self, uint32_t method_idx,
mirror::ArtMethod** referrer,
InvokeType type) {
- mirror::ArtMethod* resolved_method = GetResolvedMethod(method_idx, *referrer, type);
+ mirror::ArtMethod* resolved_method = GetResolvedMethod(method_idx, *referrer);
if (LIKELY(resolved_method != nullptr)) {
return resolved_method;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d99e373..f5ac350 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2673,8 +2673,7 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
const OatFile::OatClass* oat_class,
- const DexFile& dex_file, uint32_t dex_method_index,
- uint32_t method_index) {
+ uint32_t class_def_method_index) {
Runtime* runtime = Runtime::Current();
if (runtime->IsCompiler()) {
// The following code only applies to a non-compiler runtime.
@@ -2686,7 +2685,7 @@ void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
if (oat_class != nullptr) {
// Every kind of method should at least get an invoke stub from the oat_method.
// non-abstract methods also get their code pointers.
- const OatFile::OatMethod oat_method = oat_class->GetOatMethod(method_index);
+ const OatFile::OatMethod oat_method = oat_class->GetOatMethod(class_def_method_index);
oat_method.LinkMethod(method.Get());
}
@@ -2788,18 +2787,17 @@ void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file,
OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
&has_oat_class);
if (has_oat_class) {
- LoadClassMembers(self, dex_file, class_data, klass, class_loader, &oat_class);
+ LoadClassMembers(self, dex_file, class_data, klass, &oat_class);
}
}
if (!has_oat_class) {
- LoadClassMembers(self, dex_file, class_data, klass, class_loader, nullptr);
+ LoadClassMembers(self, dex_file, class_data, klass, nullptr);
}
}
void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
const uint8_t* class_data,
Handle<mirror::Class> klass,
- mirror::ClassLoader* class_loader,
const OatFile::OatClass* oat_class) {
// Load fields.
ClassDataItemIterator it(dex_file, class_data);
@@ -2876,7 +2874,7 @@ void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
return;
}
klass->SetDirectMethod(i, method.Get());
- LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
+ LinkCode(method, oat_class, class_def_method_index);
uint32_t it_method_index = it.GetMemberIndex();
if (last_dex_method_index == it_method_index) {
// duplicate case
@@ -2898,7 +2896,7 @@ void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
}
klass->SetVirtualMethod(i, method.Get());
DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i);
- LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
+ LinkCode(method, oat_class, class_def_method_index);
class_def_method_index++;
}
DCHECK(!it.HasNext());
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 8034d62..a1cae4d 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -222,8 +222,7 @@ class ClassLinker {
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetResolvedMethod(uint32_t method_idx, mirror::ArtMethod* referrer,
- InvokeType type)
+ mirror::ArtMethod* GetResolvedMethod(uint32_t method_idx, mirror::ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, mirror::ArtMethod** referrer,
InvokeType type)
@@ -506,8 +505,7 @@ class ClassLinker {
Handle<mirror::Class> klass, mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadClassMembers(Thread* self, const DexFile& dex_file, const uint8_t* class_data,
- Handle<mirror::Class> klass, mirror::ClassLoader* class_loader,
- const OatFile::OatClass* oat_class)
+ Handle<mirror::Class> klass, const OatFile::OatClass* oat_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it,
@@ -581,7 +579,7 @@ class ClassLinker {
bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, size_t* class_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LinkCode(Handle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
- const DexFile& dex_file, uint32_t dex_method_index, uint32_t method_index)
+ uint32_t class_def_method_index)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 1ca6eb3..bd0dbaa 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -85,7 +85,7 @@ class CommonRuntimeTest : public testing::Test {
virtual void SetUp();
// Allow subclases such as CommonCompilerTest to add extra options.
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) {}
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options ATTRIBUTE_UNUSED) {}
void ClearDirectory(const char* dirpath);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 5409d54..a9663bb 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -231,7 +231,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
virtual ~DebugInstrumentationListener() {}
void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc)
+ uint32_t dex_pc ATTRIBUTE_UNUSED)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
@@ -254,6 +254,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
uint32_t dex_pc)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
+ UNUSED(thread, this_object, method, dex_pc);
LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
<< " " << dex_pc;
}
@@ -267,16 +268,18 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
uint32_t dex_pc, mirror::ArtField* field)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread);
Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
}
- void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value)
+ void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
+ mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
+ const JValue& field_value)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
}
- void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
+ void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, const ThrowLocation& throw_location,
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 48e457f..488ba7f 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -189,6 +189,7 @@ class DeoptimizationRequest {
// Method for selective deoptimization.
jmethodID method_;
};
+std::ostream& operator<<(std::ostream& os, const DeoptimizationRequest::Kind& rhs);
class Dbg {
public:
@@ -493,7 +494,7 @@ class Dbg {
/*
* Debugger notification
*/
- enum {
+ enum EventFlag {
kBreakpoint = 0x01,
kSingleStep = 0x02,
kMethodEntry = 0x04,
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 10fe6bf..a07a5b6 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -1252,7 +1252,7 @@ class EncodedStaticFieldValueIterator {
template<bool kTransactionActive>
void ReadValueToField(Handle<mirror::ArtField> field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool HasNext() { return pos_ < array_size_; }
+ bool HasNext() const { return pos_ < array_size_; }
void Next();
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 72802e4..af5d9d0 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -35,7 +35,7 @@ enum {
class Instruction {
public:
// NOP-encoded switch-statement signatures.
- enum {
+ enum Signatures {
kPackedSwitchSignature = 0x0100,
kSparseSwitchSignature = 0x0200,
kArrayDataSignature = 0x0300,
@@ -79,10 +79,7 @@ class Instruction {
DISALLOW_COPY_AND_ASSIGN(ArrayDataPayload);
};
- // TODO: the code layout below is deliberate to avoid this enum being picked up by
- // generate-operator-out.py.
- enum Code
- { // NOLINT(whitespace/braces)
+ enum Code { // private marker to avoid generate-operator-out.py from processing.
#define INSTRUCTION_ENUM(opcode, cname, p, f, r, i, a, v) cname = opcode,
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_ENUM)
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index f76da8e..1a8ca02 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -35,7 +35,6 @@
namespace art {
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <const bool kAccessCheck>
ALWAYS_INLINE
static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
@@ -90,7 +89,6 @@ static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
return klass;
}
-// TODO: Fix no thread safety analysis when annotalysis is smarter.
ALWAYS_INLINE
static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self,
@@ -120,7 +118,6 @@ static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class*
// cannot be resolved, throw an error. If it can, use it to create an instance.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
@@ -140,11 +137,9 @@ static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
}
// Given the context of a calling Method and a resolved class, create an instance.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
@@ -163,11 +158,9 @@ static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
}
// Given the context of a calling Method and an initialized class, create an instance.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
@@ -176,7 +169,6 @@ static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klas
}
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck>
ALWAYS_INLINE
static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
@@ -213,7 +205,6 @@ static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
// it cannot be resolved, throw an error. If it can, use it to create an array.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
@@ -362,7 +353,7 @@ static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
mirror::Object** this_object,
mirror::ArtMethod** referrer, Thread* self) {
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- mirror::ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer, type);
+ mirror::ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer);
if (resolved_method == nullptr) {
StackHandleScope<1> hs(self);
mirror::Object* null_this = nullptr;
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index c46d887..311cafa 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -47,7 +47,6 @@ ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
Thread* self, bool* slow_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-// TODO: Fix no thread safety analysis when annotalysis is smarter.
ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self, bool* slow_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -66,7 +65,6 @@ ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCode(uint32_t type_id
// Given the context of a calling Method and a resolved class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -74,7 +72,6 @@ ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::
// Given the context of a calling Method and an initialized class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
index f0ad6de..afe769e 100644
--- a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
@@ -25,6 +25,7 @@ extern "C" void art_portable_fill_array_data_from_code(mirror::ArtMethod* method
mirror::Array* array,
uint32_t payload_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(dex_pc);
const DexFile::CodeItem* code_item = method->GetCodeItem();
const Instruction::ArrayDataPayload* payload =
reinterpret_cast<const Instruction::ArrayDataPayload*>(code_item->insns_ + payload_offset);
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index 61d66ba..e7975f8 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -121,6 +121,7 @@ class PortableArgumentVisitor {
private:
static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if (defined(__i386__))
+ UNUSED(mh);
return 0;
#else
size_t args_in_regs = 0;
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 7dbfdd5..cccf8f3 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -29,8 +29,7 @@ static constexpr bool kUseTlabFastPath = true;
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
- StackReference<mirror::ArtMethod>* sp) \
+ uint32_t type_idx, mirror::ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
@@ -59,6 +58,7 @@ extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, mirror::ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
if (LIKELY(klass->IsInitialized())) { \
@@ -80,11 +80,12 @@ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
} \
} \
} \
- return AllocObjectFromCodeResolved<instrumented_bool>(klass, method, self, allocator_type); \
+ return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
mirror::Class* klass, mirror::ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
size_t byte_count = klass->GetObjectSize(); \
@@ -104,7 +105,7 @@ extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
return obj; \
} \
} \
- return AllocObjectFromCodeInitialized<instrumented_bool>(klass, method, self, allocator_type); \
+ return AllocObjectFromCodeInitialized<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, Thread* self) \
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index e0aab75..5cb5178 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -229,7 +229,7 @@ class QuickArgumentVisitor {
gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
- + StackArgumentStartFromShorty(is_static, shorty, shorty_len)),
+ + sizeof(StackReference<mirror::ArtMethod>)), // Skip StackReference<ArtMethod>.
gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0),
cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) {
COMPILE_ASSERT(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), knum_of_quick_fpr_arg_unexpected);
@@ -409,13 +409,6 @@ class QuickArgumentVisitor {
}
}
- private:
- static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty,
- uint32_t shorty_len) {
- // 'stack_args_' points to the first method's argument
- return sizeof(StackReference<mirror::ArtMethod>); // Skip StackReference<ArtMethod>.
- }
-
protected:
const bool is_static_;
const char* const shorty_;
@@ -1234,7 +1227,9 @@ class ComputeNativeCallFrameSize {
}
virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {}
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(sm);
+ }
void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
@@ -1366,8 +1361,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
// WARNING: After this, *sp won't be pointing to the method anymore!
uint8_t* ComputeLayout(Thread* self, StackReference<mirror::ArtMethod>** m,
- bool is_static, const char* shorty, uint32_t shorty_len,
- HandleScope** handle_scope,
+ const char* shorty, uint32_t shorty_len, HandleScope** handle_scope,
uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Walk(shorty, shorty_len);
@@ -1441,9 +1435,9 @@ class FillNativeCall {
cur_stack_arg_++;
}
- virtual uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ virtual uintptr_t PushHandle(mirror::Object*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
- return 0U;
+ UNREACHABLE();
}
private:
@@ -1464,7 +1458,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
uintptr_t* start_gpr_reg;
uint32_t* start_fpr_reg;
uintptr_t* start_stack_arg;
- bottom_of_used_area_ = fsc.ComputeLayout(self, sp, is_static, shorty, shorty_len,
+ bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len,
&handle_scope_,
&start_stack_arg,
&start_gpr_reg, &start_fpr_reg);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index a9af754..85a0b99 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -34,7 +34,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
r->SetInstructionSet(isa);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(type);
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, type);
t->TransitionFromRunnableToSuspended(ThreadState::kNative); // So we can shut down.
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 1365cd4..ee9b221 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -165,7 +165,7 @@ TEST_F(ExceptionTest, StackTraceElement) {
std::vector<uintptr_t> fake_stack;
Runtime* r = Runtime::Current();
r->SetInstructionSet(kRuntimeISA);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(Runtime::kSaveAll);
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, Runtime::kSaveAll);
QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 4ae929b..c473684 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -383,7 +383,7 @@ JavaStackTraceHandler::JavaStackTraceHandler(FaultManager* manager) : FaultHandl
bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
// Make sure that we are in the generated code, but we may not have a dex pc.
-
+ UNUSED(sig);
#ifdef TEST_NESTED_SIGNAL
bool in_generated_code = true;
#else
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 753b42d..0a15e9e 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -46,6 +46,7 @@ class ModUnionClearCardSetVisitor {
}
inline void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
+ UNUSED(new_value);
if (expected_value == CardTable::kCardDirty) {
cleared_cards_->insert(card);
}
@@ -62,6 +63,7 @@ class ModUnionClearCardVisitor {
}
void operator()(uint8_t* card, uint8_t expected_card, uint8_t new_card) const {
+ UNUSED(new_card);
if (expected_card == CardTable::kCardDirty) {
cleared_cards_->push_back(card);
}
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index d43dc0a..b16a146 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -43,6 +43,7 @@ class RememberedSetCardVisitor {
: dirty_cards_(dirty_cards) {}
void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
+ UNUSED(new_value);
if (expected_value == CardTable::kCardDirty) {
dirty_cards_->insert(card);
}
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 40856fc..850325a 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -91,7 +91,7 @@ class SimpleCounter {
public:
explicit SimpleCounter(size_t* counter) : count_(counter) {}
- void operator()(mirror::Object* obj) const {
+ void operator()(mirror::Object* obj ATTRIBUTE_UNUSED) const {
(*count_)++;
}
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index fbeba7f..acff52d 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -43,7 +43,8 @@ static void art_heap_corruption(const char* function) {
}
static void art_heap_usage_error(const char* function, void* p) {
- LOG(::art::FATAL) << "Incorrect use of function '" << function << "' argument " << p << " not expected";
+ LOG(::art::FATAL) << "Incorrect use of function '" << function << "' argument " << p
+ << " not expected";
}
#include "globals.h"
@@ -70,7 +71,9 @@ extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_byte
}
}
-extern "C" void DlmallocBytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+extern "C" void DlmallocBytesAllocatedCallback(void* start ATTRIBUTE_UNUSED,
+ void* end ATTRIBUTE_UNUSED, size_t used_bytes,
+ void* arg) {
if (used_bytes == 0) {
return;
}
@@ -78,7 +81,10 @@ extern "C" void DlmallocBytesAllocatedCallback(void* start, void* end, size_t us
*bytes_allocated += used_bytes + sizeof(size_t);
}
-extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes,
+ void* arg) {
+ UNUSED(start);
+ UNUSED(end);
if (used_bytes == 0) {
return;
}
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index fa531a7..f5e2fed 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -1851,7 +1851,8 @@ void RosAlloc::Initialize() {
dedicated_full_run_->SetIsThreadLocal(true);
}
-void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes, void* arg) {
if (used_bytes == 0) {
return;
}
@@ -1859,7 +1860,8 @@ void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes,
*bytes_allocated += used_bytes;
}
-void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes, void* arg) {
if (used_bytes == 0) {
return;
}
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index ad7f901..a2f8342 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -105,6 +105,9 @@ class RosAlloc {
rosalloc->ReleasePageRange(start, start + byte_size);
}
}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FreePageRun);
};
// Represents a run of memory slots of the same size.
@@ -256,6 +259,8 @@ class RosAlloc {
size_t MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base, const char* caller_name);
// Turns the bit map into a string for debugging.
static std::string BitMapToStr(uint32_t* bit_map_base, size_t num_vec);
+
+ // TODO: DISALLOW_COPY_AND_ASSIGN(Run);
};
// The magic number for a run.
@@ -446,7 +451,7 @@ class RosAlloc {
// Bracket lock names (since locks only have char* names).
std::string size_bracket_lock_names_[kNumOfSizeBrackets];
// The types of page map entries.
- enum {
+ enum PageMapKind {
kPageMapReleased = 0, // Zero and released back to the OS.
kPageMapEmpty, // Zero but probably dirty.
kPageMapRun, // The beginning of a run.
@@ -526,11 +531,15 @@ class RosAlloc {
// Release a range of pages.
size_t ReleasePageRange(uint8_t* start, uint8_t* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Dumps the page map for debugging.
+ std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
public:
RosAlloc(void* base, size_t capacity, size_t max_capacity,
PageReleaseMode page_release_mode,
size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
~RosAlloc();
+
// If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization.
// If used, this may cause race conditions if multiple threads are allocating at the same time.
template<bool kThreadSafe = true>
@@ -540,6 +549,7 @@ class RosAlloc {
LOCKS_EXCLUDED(bulk_free_lock_);
size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
LOCKS_EXCLUDED(bulk_free_lock_);
+
// Returns the size of the allocated slot for a given allocated memory chunk.
size_t UsableSize(void* ptr);
// Returns the size of the allocated slot for a given size.
@@ -557,6 +567,7 @@ class RosAlloc {
void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
void* arg)
LOCKS_EXCLUDED(lock_);
+
// Release empty pages.
size_t ReleasePages() LOCKS_EXCLUDED(lock_);
// Returns the current footprint.
@@ -565,6 +576,7 @@ class RosAlloc {
size_t FootprintLimit() LOCKS_EXCLUDED(lock_);
// Update the current capacity.
void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_);
+
// Releases the thread-local runs assigned to the given thread back to the common set of runs.
void RevokeThreadLocalRuns(Thread* thread);
// Releases the thread-local runs assigned to all the threads back to the common set of runs.
@@ -573,8 +585,7 @@ class RosAlloc {
void AssertThreadLocalRunsAreRevoked(Thread* thread);
// Assert all the thread local runs are revoked.
void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_);
- // Dumps the page map for debugging.
- std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
static Run* GetDedicatedFullRun() {
return dedicated_full_run_;
}
@@ -597,7 +608,13 @@ class RosAlloc {
void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes);
+
+ private:
+ friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
+
+ DISALLOW_COPY_AND_ASSIGN(RosAlloc);
};
+std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
} // namespace allocator
} // namespace gc
diff --git a/runtime/gc/allocator_type.h b/runtime/gc/allocator_type.h
index 938b0f1..c6ebc73 100644
--- a/runtime/gc/allocator_type.h
+++ b/runtime/gc/allocator_type.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
#define ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
+#include <ostream>
+
namespace art {
namespace gc {
@@ -29,6 +31,7 @@ enum AllocatorType {
kAllocatorTypeNonMoving, // Special allocator for non moving objects, doesn't have entrypoints.
kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints.
};
+std::ostream& operator<<(std::ostream& os, const AllocatorType& rhs);
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index ce7c75a..ee5a785 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -29,7 +29,9 @@ class ConcurrentCopying : public GarbageCollector {
const std::string& name_prefix = "")
: GarbageCollector(heap,
name_prefix + (name_prefix.empty() ? "" : " ") +
- "concurrent copying + mark sweep") {}
+ "concurrent copying + mark sweep") {
+ UNUSED(generational);
+ }
~ConcurrentCopying() {}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index ad3bb11..e3966e3 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -658,6 +658,7 @@ class MarkStackTask : public Task {
// Scans all of the objects
virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ UNUSED(self);
ScanObjectParallelVisitor visitor(this);
// TODO: Tune this.
static const size_t kFifoSize = 4;
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 4ed6abc..5be3db7 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -58,6 +58,7 @@ void StickyMarkSweep::MarkReachableObjects() {
}
void StickyMarkSweep::Sweep(bool swap_bitmaps) {
+ UNUSED(swap_bitmaps);
SweepArray(GetHeap()->GetLiveStack(), false);
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 8e080d1..9fd9a2b 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -265,14 +265,13 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
// Attempt to create 2 mem maps at or after the requested begin.
main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
- PROT_READ | PROT_WRITE, &error_str));
+ &error_str));
CHECK(main_mem_map_1.get() != nullptr) << error_str;
if (support_homogeneous_space_compaction ||
background_collector_type_ == kCollectorTypeSS ||
foreground_collector_type_ == kCollectorTypeSS) {
main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
- capacity_, PROT_READ | PROT_WRITE,
- &error_str));
+ capacity_, &error_str));
CHECK(main_mem_map_2.get() != nullptr) << error_str;
}
// Create the non moving space first so that bitmaps don't take up the address range.
@@ -435,8 +434,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
}
-MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, size_t capacity,
- int prot_flags, std::string* out_error_str) {
+MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
+ size_t capacity, std::string* out_error_str) {
while (true) {
MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
PROT_READ | PROT_WRITE, true, out_error_str);
@@ -887,7 +886,7 @@ space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok)
if (result != NULL) {
return result;
}
- return FindDiscontinuousSpaceFromObject(obj, true);
+ return FindDiscontinuousSpaceFromObject(obj, fail_ok);
}
space::ImageSpace* Heap::GetImageSpace() const {
@@ -1832,6 +1831,7 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
// Don't sweep any spaces since we probably blasted the internal accounting of the free list
// allocator.
+ UNUSED(space);
return false;
}
@@ -2239,6 +2239,7 @@ class VerifyReferenceVisitor {
void operator()(mirror::Class* klass, mirror::Reference* ref) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(klass);
if (verify_referent_) {
VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
}
@@ -2583,6 +2584,7 @@ bool Heap::VerifyMissingCardMarks() {
}
void Heap::SwapStacks(Thread* self) {
+ UNUSED(self);
if (kUseThreadLocalAllocationStack) {
live_stack_->AssertAllZero();
}
@@ -2711,6 +2713,7 @@ void Heap::PreGcVerification(collector::GarbageCollector* gc) {
}
void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
+ UNUSED(gc);
// TODO: Add a new runtime option for this?
if (verify_pre_gc_rosalloc_) {
RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 7b891a6..cf7352e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -622,8 +622,7 @@ class Heap {
// Create a mem map with a preferred base address.
static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
- size_t capacity, int prot_flags,
- std::string* out_error_str);
+ size_t capacity, std::string* out_error_str);
bool SupportHSpaceCompaction() const {
// Returns true if we can do hspace compaction
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index d2d95b4..445c720 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -314,6 +314,7 @@ static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void*
}
void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
+ UNUSED(failed_alloc_bytes);
Thread* self = Thread::Current();
size_t max_contiguous_allocation = 0;
// To allow the Walk/InspectAll() to exclusively-lock the mutator
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 9434bfe..c0c6444 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -159,7 +159,11 @@ size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_s
MutexLock mu(Thread::Current(), lock_);
auto found = mem_maps_.find(obj);
CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
- return found->second->BaseSize();
+ size_t alloc_size = found->second->BaseSize();
+ if (usable_size != nullptr) {
+ *usable_size = alloc_size;
+ }
+ return alloc_size;
}
size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h
index eb6fe9c..bc870a6 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/valgrind_malloc_space.h
@@ -44,6 +44,7 @@ class ValgrindMallocSpace FINAL : public BaseMallocSpaceType {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RegisterRecentFree(mirror::Object* ptr) OVERRIDE {
+ UNUSED(ptr);
}
ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator,
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 9de0548..a868e68 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -32,6 +32,7 @@ class CountObjectsAllocated {
: objects_allocated_(objects_allocated) {}
void operator()(mirror::Object* obj) const {
+ UNUSED(obj);
++*objects_allocated_;
}
@@ -76,30 +77,29 @@ void ZygoteSpace::Dump(std::ostream& os) const {
<< ",name=\"" << GetName() << "\"]";
}
-mirror::Object* ZygoteSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+mirror::Object* ZygoteSpace::Alloc(Thread*, size_t, size_t*, size_t*) {
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
-size_t ZygoteSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
+size_t ZygoteSpace::AllocationSize(mirror::Object*, size_t*) {
UNIMPLEMENTED(FATAL);
- return 0;
+ UNREACHABLE();
}
-size_t ZygoteSpace::Free(Thread* self, mirror::Object* ptr) {
+size_t ZygoteSpace::Free(Thread*, mirror::Object*) {
UNIMPLEMENTED(FATAL);
- return 0;
+ UNREACHABLE();
}
-size_t ZygoteSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+size_t ZygoteSpace::FreeList(Thread*, size_t, mirror::Object**) {
UNIMPLEMENTED(FATAL);
- return 0;
+ UNREACHABLE();
}
-void ZygoteSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
- size_t /*failed_alloc_bytes*/) {
+void ZygoteSpace::LogFragmentationAllocFailure(std::ostream&, size_t) {
UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index c55835d..2c4f0f9 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -100,7 +100,7 @@ class PACKED(4) HandleScope {
}
// Offset of link within HandleScope, used by generated code.
- static size_t LinkOffset(size_t pointer_size) {
+ static size_t LinkOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
return 0;
}
diff --git a/runtime/instruction_set.cc b/runtime/instruction_set.cc
index 0ca32fe..e165a75 100644
--- a/runtime/instruction_set.cc
+++ b/runtime/instruction_set.cc
@@ -431,9 +431,8 @@ const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromHwcap() {
// A signal handler called by a fault for an illegal instruction. We record the fact in r0
// and then increment the PC in the signal context to return to the next instruction. We know the
// instruction is an sdiv (4 bytes long).
-static void bad_divide_inst_handle(int signo, siginfo_t* si, void* data) {
- UNUSED(signo);
- UNUSED(si);
+static void bad_divide_inst_handle(int signo ATTRIBUTE_UNUSED, siginfo_t* si ATTRIBUTE_UNUSED,
+ void* data) {
#if defined(__arm__)
struct ucontext *uc = (struct ucontext *)data;
struct sigcontext *sc = &uc->uc_mcontext;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index fc3da36..160e8c3 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -595,6 +595,7 @@ void Instrumentation::ConfigureStubs(bool require_entry_exit_stubs, bool require
}
static void ResetQuickAllocEntryPointsForThread(Thread* thread, void* arg) {
+ UNUSED(arg);
thread->ResetQuickAllocEntryPointsForThread();
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 3017bf6..646c7ae 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -103,13 +103,13 @@ struct InstrumentationListener {
class Instrumentation {
public:
enum InstrumentationEvent {
- kMethodEntered = 1 << 0,
- kMethodExited = 1 << 1,
- kMethodUnwind = 1 << 2,
- kDexPcMoved = 1 << 3,
- kFieldRead = 1 << 4,
- kFieldWritten = 1 << 5,
- kExceptionCaught = 1 << 6,
+ kMethodEntered = 1, // 1 << 0
+ kMethodExited = 2, // 1 << 1
+ kMethodUnwind = 4, // 1 << 2
+ kDexPcMoved = 8, // 1 << 3
+ kFieldRead = 16, // 1 << 4,
+ kFieldWritten = 32, // 1 << 5
+ kExceptionCaught = 64, // 1 << 6
};
Instrumentation();
@@ -464,6 +464,7 @@ class Instrumentation {
DISALLOW_COPY_AND_ASSIGN(Instrumentation);
};
+std::ostream& operator<<(std::ostream& os, const Instrumentation::InstrumentationEvent& rhs);
// An element in the instrumentation side stack maintained in art::Thread.
struct InstrumentationStackFrame {
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 23324a6..89586b0 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -355,7 +355,8 @@ void InternTable::Table::Insert(mirror::String* s) {
post_zygote_table_.insert(GcRoot<mirror::String>(s));
}
-void InternTable::Table::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
+void InternTable::Table::VisitRoots(RootCallback* callback, void* arg,
+ VisitRootFlags flags ATTRIBUTE_UNUSED) {
for (auto& intern : pre_zygote_table_) {
const_cast<GcRoot<mirror::String>&>(intern).VisitRoot(callback, arg, 0, kRootInternedString);
}
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 9de12f2..44e2029 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -315,6 +315,10 @@ enum InterpreterImplKind {
kSwitchImpl, // Switch-based interpreter implementation.
kComputedGotoImplKind // Computed-goto-based interpreter implementation.
};
+std::ostream& operator<<(std::ostream& os, const InterpreterImplKind& rhs) {
+ os << ((rhs == kSwitchImpl) ? "Switch-based interpreter" : "Computed-goto-based interpreter");
+ return os;
+}
#if !defined(__clang__)
static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKind;
@@ -322,8 +326,7 @@ static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKin
// Clang 3.4 fails to build the goto interpreter implementation.
static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImpl;
template<bool do_access_check, bool transaction_active>
-JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register) {
+JValue ExecuteGotoImpl(Thread*, MethodHelper&, const DexFile::CodeItem*, ShadowFrame&, JValue) {
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 7fdc18e..be34bd3 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1126,6 +1126,7 @@ static JdwpError TR_CurrentContendedMonitor(JdwpState*, Request* request, Expand
static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(reply);
ObjectId thread_id = request->ReadThreadId();
return Dbg::Interrupt(thread_id);
}
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 4f34896..bf72c7b 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -17,6 +17,7 @@
#include "object_registry.h"
#include "handle_scope-inl.h"
+#include "jni_internal.h"
#include "mirror/class.h"
#include "scoped_thread_state_change.h"
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index ad06b85..dd66af7 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -1749,6 +1749,7 @@ class JNI {
}
static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) {
+ UNUSED(chars);
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
ScopedObjectAccess soa(env);
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1777,7 +1778,7 @@ class JNI {
return bytes;
}
- static void ReleaseStringUTFChars(JNIEnv* env, jstring, const char* chars) {
+ static void ReleaseStringUTFChars(JNIEnv*, jstring, const char* chars) {
delete[] chars;
}
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 13cc3b0..2d5c71b 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -52,7 +52,7 @@ class Monitor;
*/
class LockWord {
public:
- enum {
+ enum SizeShiftsAndMasks { // private marker to avoid generate-operator-out.py from processing.
// Number of bits to encode the state, currently just fat or thin/unlocked or hash code.
kStateSize = 2,
// Number of bits to encode the thin lock owner.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index c118471..51aba9c 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -238,6 +238,9 @@ static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte
MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot,
bool low_4gb, std::string* error_msg) {
+#ifndef __LP64__
+ UNUSED(low_4gb);
+#endif
if (byte_count == 0) {
return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
}
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 7e1ad78..13f881d 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -81,6 +81,7 @@ static inline size_t ComputeArraySize(Thread* self, Class* array_class, int32_t
// 64-bit. No overflow as component_count is 32-bit and the maximum
// component size is 8.
DCHECK_LE((1U << component_size_shift), 8U);
+ UNUSED(self);
#else
// 32-bit.
DCHECK_NE(header_size, 0U);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index d9094fc..68fbb8b 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -839,7 +839,7 @@ class MANAGED Class FINAL : public Object {
// Returns the number of static fields containing reference types.
uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(IsResolved() || IsErroneous()) << PrettyClass(this) << " status=" << GetStatus();
+ DCHECK(IsResolved() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index c9e60bc..c451764 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -135,6 +135,7 @@ inline void Object::SetReadBarrierPointer(Object* rb_ptr) {
#else
LOG(FATAL) << "Unreachable";
UNREACHABLE();
+ UNUSED(rb_ptr);
#endif
}
@@ -156,6 +157,7 @@ inline bool Object::AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object*
DCHECK_EQ(new_ref.reference_, atomic_rb_ptr->LoadRelaxed());
return true;
#else
+ UNUSED(expected_rb_ptr, rb_ptr);
LOG(FATAL) << "Unreachable";
UNREACHABLE();
#endif
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 5b92093..27678dc 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -53,6 +53,7 @@ class MonitorPool {
static void ReleaseMonitor(Thread* self, Monitor* monitor) {
#ifndef __LP64__
+ UNUSED(self);
delete monitor;
#else
GetMonitorPool()->ReleaseMonitorToPool(self, monitor);
@@ -61,6 +62,7 @@ class MonitorPool {
static void ReleaseMonitors(Thread* self, MonitorList::Monitors* monitors) {
#ifndef __LP64__
+ UNUSED(self);
STLDeleteElements(monitors);
#else
GetMonitorPool()->ReleaseMonitorsToPool(self, monitors);
@@ -85,6 +87,7 @@ class MonitorPool {
static MonitorId ComputeMonitorId(Monitor* mon, Thread* self) {
#ifndef __LP64__
+ UNUSED(self);
return MonitorIdFromMonitor(mon);
#else
return GetMonitorPool()->ComputeMonitorIdInPool(mon, self);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index c35bb30..e1ceb8c 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -166,13 +166,13 @@ static jstring VMRuntime_vmInstructionSet(JNIEnv* env, jobject) {
return env->NewStringUTF(isa_string);
}
-static jboolean VMRuntime_is64Bit(JNIEnv* env, jobject) {
+static jboolean VMRuntime_is64Bit(JNIEnv*, jobject) {
bool is64BitMode = (sizeof(void*) == sizeof(uint64_t));
return is64BitMode ? JNI_TRUE : JNI_FALSE;
}
static jboolean VMRuntime_isCheckJniEnabled(JNIEnv* env, jobject) {
- return Runtime::Current()->GetJavaVM()->IsCheckJniEnabled() ? JNI_TRUE : JNI_FALSE;
+ return down_cast<JNIEnvExt*>(env)->vm->IsCheckJniEnabled() ? JNI_TRUE : JNI_FALSE;
}
static void VMRuntime_setTargetSdkVersionNative(JNIEnv*, jobject, jint target_sdk_version) {
@@ -201,9 +201,10 @@ static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) {
Runtime::Current()->GetHeap()->RegisterNativeFree(env, static_cast<size_t>(bytes));
}
-static void VMRuntime_updateProcessState(JNIEnv* env, jobject, jint process_state) {
- Runtime::Current()->GetHeap()->UpdateProcessState(static_cast<gc::ProcessState>(process_state));
- Runtime::Current()->UpdateProfilerState(process_state);
+static void VMRuntime_updateProcessState(JNIEnv*, jobject, jint process_state) {
+ Runtime* runtime = Runtime::Current();
+ runtime->GetHeap()->UpdateProcessState(static_cast<gc::ProcessState>(process_state));
+ runtime->UpdateProfilerState(process_state);
}
static void VMRuntime_trimHeap(JNIEnv*, jobject) {
@@ -514,8 +515,9 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
* for ART.
*/
static void VMRuntime_registerAppInfo(JNIEnv* env, jclass, jstring pkgName,
- jstring appDir, jstring procName) {
- const char *pkgNameChars = env->GetStringUTFChars(pkgName, NULL);
+ jstring appDir ATTRIBUTE_UNUSED,
+ jstring procName ATTRIBUTE_UNUSED) {
+ const char *pkgNameChars = env->GetStringUTFChars(pkgName, nullptr);
std::string profileFile = StringPrintf("/data/dalvik-cache/profiles/%s", pkgNameChars);
Runtime::Current()->StartProfiler(profileFile.c_str());
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index e469126..adc7c4f 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -100,8 +100,7 @@ static jlong ZygoteHooks_nativePreFork(JNIEnv* env, jclass) {
runtime->PreZygoteFork();
// Grab thread before fork potentially makes Thread::pthread_key_self_ unusable.
- Thread* self = Thread::Current();
- return reinterpret_cast<jlong>(self);
+ return reinterpret_cast<jlong>(ThreadForEnv(env));
}
static void ZygoteHooks_nativePostForkChild(JNIEnv* env, jclass, jlong token, jint debug_flags,
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 7f5a611..1f07336 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -57,9 +57,8 @@ ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::ArtFiel
}
template<bool kAllowReferences>
-ALWAYS_INLINE inline static bool GetFieldValue(
- const ScopedFastNativeObjectAccess& soa, mirror::Object* o, mirror::ArtField* f,
- Primitive::Type field_type, JValue* value)
+ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::ArtField* f,
+ Primitive::Type field_type, JValue* value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_EQ(value->GetJ(), INT64_C(0));
switch (field_type) {
@@ -148,7 +147,7 @@ static jobject Field_get(JNIEnv* env, jobject javaField, jobject javaObj, jboole
// Get the field's value, boxing if necessary.
Primitive::Type field_type = f->GetTypeAsPrimitiveType();
JValue value;
- if (!GetFieldValue<true>(soa, o, f, field_type, &value)) {
+ if (!GetFieldValue<true>(o, f, field_type, &value)) {
DCHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
@@ -178,13 +177,13 @@ ALWAYS_INLINE inline static JValue GetPrimitiveField(JNIEnv* env, jobject javaFi
JValue field_value;
if (field_type == kPrimitiveType) {
// This if statement should get optimized out since we only pass in valid primitive types.
- if (UNLIKELY(!GetFieldValue<false>(soa, o, f, kPrimitiveType, &field_value))) {
+ if (UNLIKELY(!GetFieldValue<false>(o, f, kPrimitiveType, &field_value))) {
DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
return field_value;
}
- if (!GetFieldValue<false>(soa, o, f, field_type, &field_value)) {
+ if (!GetFieldValue<false>(o, f, field_type, &field_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
@@ -232,9 +231,8 @@ static jshort Field_getShort(JNIEnv* env, jobject javaField, jobject javaObj, jb
return GetPrimitiveField<Primitive::kPrimShort>(env, javaField, javaObj, accessible).GetS();
}
-static void SetFieldValue(ScopedFastNativeObjectAccess& soa, mirror::Object* o,
- mirror::ArtField* f, Primitive::Type field_type, bool allow_references,
- const JValue& new_value)
+static void SetFieldValue(mirror::Object* o, mirror::ArtField* f, Primitive::Type field_type,
+ bool allow_references, const JValue& new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(f->GetDeclaringClass()->IsInitialized());
switch (field_type) {
@@ -317,7 +315,7 @@ static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject j
DCHECK(soa.Self()->IsExceptionPending());
return;
}
- SetFieldValue(soa, o, f, field_prim_type, true, unboxed_value);
+ SetFieldValue(o, f, field_prim_type, true, unboxed_value);
}
template<Primitive::Type kPrimitiveType>
@@ -350,7 +348,7 @@ static void SetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj,
}
// Write the value.
- SetFieldValue(soa, o, f, field_type, false, wide_value);
+ SetFieldValue(o, f, field_type, false, wide_value);
}
static void Field_setBoolean(JNIEnv* env, jobject javaField, jobject javaObj, jboolean z,
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index e9ad353..300abc9 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -26,11 +26,11 @@ class NoopCompilerCallbacks FINAL : public CompilerCallbacks {
NoopCompilerCallbacks() {}
~NoopCompilerCallbacks() {}
- bool MethodVerified(verifier::MethodVerifier* verifier) OVERRIDE {
+ bool MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {
return true;
}
- void ClassRejected(ClassReference ref) OVERRIDE {}
+ void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {}
// This is only used by compilers which need to be able to run without relocation even when it
// would normally be enabled. For example the patchoat executable, and dex2oat --image, both need
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 6b64c25..e3bd541 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -741,7 +741,7 @@ void ParsedOptions::Abort() {
}
void ParsedOptions::UsageMessageV(FILE* stream, const char* fmt, va_list ap) {
- hook_vfprintf_(stderr, fmt, ap);
+ hook_vfprintf_(stream, fmt, ap);
}
void ParsedOptions::UsageMessage(FILE* stream, const char* fmt, ...) {
diff --git a/runtime/profiler_options.h b/runtime/profiler_options.h
index e3ef697..1db2f05 100644
--- a/runtime/profiler_options.h
+++ b/runtime/profiler_options.h
@@ -26,6 +26,7 @@ enum ProfileDataType {
kProfilerMethod, // Method only
kProfilerBoundedStack, // Methods with Dex PC on top of the stack
};
+std::ostream& operator<<(std::ostream& os, const ProfileDataType& rhs);
class ProfilerOptions {
public:
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index c58735a..90c9fe7 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -297,7 +297,7 @@ void QuickExceptionHandler::DeoptimizeStack() {
// Unwinds all instrumentation stack frame prior to catch handler or upcall.
class InstrumentationStackVisitor : public StackVisitor {
public:
- InstrumentationStackVisitor(Thread* self, bool is_deoptimization, size_t frame_depth)
+ InstrumentationStackVisitor(Thread* self, size_t frame_depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(self, nullptr),
frame_depth_(frame_depth),
@@ -332,7 +332,7 @@ class InstrumentationStackVisitor : public StackVisitor {
void QuickExceptionHandler::UpdateInstrumentationStack() {
if (method_tracing_active_) {
- InstrumentationStackVisitor visitor(self_, is_deoptimization_, handler_frame_depth_);
+ InstrumentationStackVisitor visitor(self_, handler_frame_depth_);
visitor.WalkStack(true);
size_t instrumentation_frames_to_pop = visitor.GetInstrumentationFramesToPop();
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index fd43d78..0dc31e7 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -27,9 +27,7 @@ template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
inline MirrorType* ReadBarrier::Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
// Unused for now.
- UNUSED(obj);
- UNUSED(offset);
- UNUSED(ref_addr);
+ UNUSED(obj, offset, ref_addr);
const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
if (with_read_barrier && kUseBakerReadBarrier) {
// To be implemented.
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 228d200..44d1bc4 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -219,8 +219,7 @@ class ArgArray {
PrettyDescriptor(found_descriptor).c_str()).c_str());
}
- bool BuildArgArrayFromObjectArray(const ScopedObjectAccessAlreadyRunnable& soa,
- mirror::Object* receiver,
+ bool BuildArgArrayFromObjectArray(mirror::Object* receiver,
mirror::ObjectArray<mirror::Object>* args, MethodHelper& mh)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::TypeList* classes = mh.GetMethod()->GetParameterTypeList();
@@ -613,7 +612,7 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
ArgArray arg_array(shorty, shorty_len);
StackHandleScope<1> hs(soa.Self());
MethodHelper mh(hs.NewHandle(m));
- if (!arg_array.BuildArgArrayFromObjectArray(soa, receiver, objects, mh)) {
+ if (!arg_array.BuildArgArrayFromObjectArray(receiver, objects, mh)) {
CHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index de3e976..4ac9634 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -576,8 +576,7 @@ void Runtime::StartDaemonThreads() {
VLOG(startup) << "Runtime::StartDaemonThreads exiting";
}
-static bool OpenDexFilesFromImage(const std::vector<std::string>& dex_filenames,
- const std::string& image_location,
+static bool OpenDexFilesFromImage(const std::string& image_location,
std::vector<const DexFile*>& dex_files,
size_t* failures) {
std::string system_filename;
@@ -639,8 +638,7 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
const std::string& image_location,
std::vector<const DexFile*>& dex_files) {
size_t failure_count = 0;
- if (!image_location.empty() && OpenDexFilesFromImage(dex_filenames, image_location, dex_files,
- &failure_count)) {
+ if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
return failure_count;
}
failure_count = 0;
@@ -828,7 +826,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!HasCalleeSaveMethod(type)) {
- SetCalleeSaveMethod(CreateCalleeSaveMethod(type), type);
+ SetCalleeSaveMethod(CreateCalleeSaveMethod(), type);
}
}
} else {
@@ -1260,7 +1258,7 @@ mirror::ArtMethod* Runtime::CreateResolutionMethod() {
return method.Get();
}
-mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(CalleeSaveType type) {
+mirror::ArtMethod* Runtime::CreateCalleeSaveMethod() {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 11db613..3cbe1e5 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -55,8 +55,8 @@ namespace mirror {
class Throwable;
} // namespace mirror
namespace verifier {
-class MethodVerifier;
-}
+ class MethodVerifier;
+} // namespace verifier
class ClassLinker;
class DexFile;
class InternTable;
@@ -379,8 +379,7 @@ class Runtime {
void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type);
- mirror::ArtMethod* CreateCalleeSaveMethod(CalleeSaveType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
int32_t GetStat(int kind);
@@ -655,6 +654,7 @@ class Runtime {
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
+std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
} // namespace art
diff --git a/runtime/stack.h b/runtime/stack.h
index 2f8df61..66c840d 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -53,6 +53,7 @@ enum VRegKind {
kImpreciseConstant,
kUndefined,
};
+std::ostream& operator<<(std::ostream& os, const VRegKind& rhs);
// A reference from the shadow stack to a MirrorType object within the Java heap.
template<class MirrorType>
@@ -336,9 +337,7 @@ class ShadowFrame {
}
#if defined(ART_USE_PORTABLE_COMPILER)
- enum ShadowFrameFlag {
- kHasReferenceArray = 1ul << 31
- };
+ constexpr uint32_t kHasReferenceArray = 1ul << 31;
// TODO: make const in the portable case.
uint32_t number_of_vregs_;
#else
@@ -633,6 +632,7 @@ class StackVisitor {
}
static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
+ UNUSED(isa);
// According to stack model, the first out is above the Method referernce.
return sizeof(StackReference<mirror::ArtMethod>) + (out_num * sizeof(uint32_t));
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index f8c8fdb..e3ef4eb 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -220,7 +220,7 @@ static void UnsafeLogFatalForThreadSuspendAllTimeout() {
// individual thread requires polling. delay_us is the requested sleep and total_delay_us
// accumulates the total time spent sleeping for timeouts. The first sleep is just a yield,
// subsequently sleeps increase delay_us from 1ms to 500ms by doubling.
-static void ThreadSuspendSleep(Thread* self, useconds_t* delay_us, useconds_t* total_delay_us) {
+static void ThreadSuspendSleep(useconds_t* delay_us, useconds_t* total_delay_us) {
useconds_t new_delay_us = (*delay_us) * 2;
CHECK_GE(new_delay_us, *delay_us);
if (new_delay_us < 500000) { // Don't allow sleeping to be more than 0.5s.
@@ -285,7 +285,7 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
useconds_t total_delay_us = 0;
do {
useconds_t delay_us = 100;
- ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+ ThreadSuspendSleep(&delay_us, &total_delay_us);
} while (!thread->IsSuspended());
// Shouldn't need to wait for longer than 1000 microseconds.
constexpr useconds_t kLongWaitThresholdUS = 1000;
@@ -561,7 +561,7 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
// Release locks and come out of runnable state.
}
VLOG(threads) << "SuspendThreadByPeer sleeping to allow thread chance to suspend";
- ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+ ThreadSuspendSleep(&delay_us, &total_delay_us);
}
}
@@ -639,7 +639,7 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspe
// Release locks and come out of runnable state.
}
VLOG(threads) << "SuspendThreadByThreadId sleeping to allow thread chance to suspend";
- ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+ ThreadSuspendSleep(&delay_us, &total_delay_us);
}
}
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index e8c9ff8..a7f2ecd 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -42,14 +42,14 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n
}
ThreadPoolWorker::~ThreadPoolWorker() {
- CHECK_PTHREAD_CALL(pthread_join, (pthread_, NULL), "thread pool worker shutdown");
+ CHECK_PTHREAD_CALL(pthread_join, (pthread_, nullptr), "thread pool worker shutdown");
}
void ThreadPoolWorker::Run() {
Thread* self = Thread::Current();
- Task* task = NULL;
+ Task* task = nullptr;
thread_pool_->creation_barier_.Wait(self);
- while ((task = thread_pool_->GetTask(self)) != NULL) {
+ while ((task = thread_pool_->GetTask(self)) != nullptr) {
task->Run(self);
task->Finalize();
}
@@ -58,11 +58,11 @@ void ThreadPoolWorker::Run() {
void* ThreadPoolWorker::Callback(void* arg) {
ThreadPoolWorker* worker = reinterpret_cast<ThreadPoolWorker*>(arg);
Runtime* runtime = Runtime::Current();
- CHECK(runtime->AttachCurrentThread(worker->name_.c_str(), true, NULL, false));
+ CHECK(runtime->AttachCurrentThread(worker->name_.c_str(), true, nullptr, false));
// Do work until its time to shut down.
worker->Run();
runtime->DetachCurrentThread();
- return NULL;
+ return nullptr;
}
void ThreadPool::AddTask(Thread* self, Task* task) {
@@ -137,8 +137,8 @@ Task* ThreadPool::GetTask(Thread* self) {
const size_t active_threads = thread_count - waiting_count_;
// <= since self is considered an active worker.
if (active_threads <= max_active_workers_) {
- Task* task = TryGetTaskLocked(self);
- if (task != NULL) {
+ Task* task = TryGetTaskLocked();
+ if (task != nullptr) {
return task;
}
}
@@ -157,28 +157,28 @@ Task* ThreadPool::GetTask(Thread* self) {
--waiting_count_;
}
- // We are shutting down, return NULL to tell the worker thread to stop looping.
- return NULL;
+ // We are shutting down, return nullptr to tell the worker thread to stop looping.
+ return nullptr;
}
Task* ThreadPool::TryGetTask(Thread* self) {
MutexLock mu(self, task_queue_lock_);
- return TryGetTaskLocked(self);
+ return TryGetTaskLocked();
}
-Task* ThreadPool::TryGetTaskLocked(Thread* self) {
+Task* ThreadPool::TryGetTaskLocked() {
if (started_ && !tasks_.empty()) {
Task* task = tasks_.front();
tasks_.pop_front();
return task;
}
- return NULL;
+ return nullptr;
}
void ThreadPool::Wait(Thread* self, bool do_work, bool may_hold_locks) {
if (do_work) {
- Task* task = NULL;
- while ((task = TryGetTask(self)) != NULL) {
+ Task* task = nullptr;
+ while ((task = TryGetTask(self)) != nullptr) {
task->Run(self);
task->Finalize();
}
@@ -201,17 +201,17 @@ size_t ThreadPool::GetTaskCount(Thread* self) {
WorkStealingWorker::WorkStealingWorker(ThreadPool* thread_pool, const std::string& name,
size_t stack_size)
- : ThreadPoolWorker(thread_pool, name, stack_size), task_(NULL) {}
+ : ThreadPoolWorker(thread_pool, name, stack_size), task_(nullptr) {}
void WorkStealingWorker::Run() {
Thread* self = Thread::Current();
- Task* task = NULL;
+ Task* task = nullptr;
WorkStealingThreadPool* thread_pool = down_cast<WorkStealingThreadPool*>(thread_pool_);
- while ((task = thread_pool_->GetTask(self)) != NULL) {
+ while ((task = thread_pool_->GetTask(self)) != nullptr) {
WorkStealingTask* stealing_task = down_cast<WorkStealingTask*>(task);
{
- CHECK(task_ == NULL);
+ CHECK(task_ == nullptr);
MutexLock mu(self, thread_pool->work_steal_lock_);
// Register that we are running the task
++stealing_task->ref_count_;
@@ -221,7 +221,7 @@ void WorkStealingWorker::Run() {
// Mark ourselves as not running a task so that nobody tries to steal from us.
// There is a race condition that someone starts stealing from us at this point. This is okay
// due to the reference counting.
- task_ = NULL;
+ task_ = nullptr;
bool finalize;
@@ -229,13 +229,13 @@ void WorkStealingWorker::Run() {
// all that happens when the race occurs is that we steal some work instead of processing a
// task from the queue.
while (thread_pool->GetTaskCount(self) == 0) {
- WorkStealingTask* steal_from_task = NULL;
+ WorkStealingTask* steal_from_task = nullptr;
{
MutexLock mu(self, thread_pool->work_steal_lock_);
// Try finding a task to steal from.
- steal_from_task = thread_pool->FindTaskToStealFrom(self);
- if (steal_from_task != NULL) {
+ steal_from_task = thread_pool->FindTaskToStealFrom();
+ if (steal_from_task != nullptr) {
CHECK_NE(stealing_task, steal_from_task)
<< "Attempting to steal from completed self task";
steal_from_task->ref_count_++;
@@ -244,7 +244,7 @@ void WorkStealingWorker::Run() {
}
}
- if (steal_from_task != NULL) {
+ if (steal_from_task != nullptr) {
// Task which completed earlier is going to steal some work.
stealing_task->StealFrom(self, steal_from_task);
@@ -284,7 +284,7 @@ WorkStealingThreadPool::WorkStealingThreadPool(const char* name, size_t num_thre
}
}
-WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom(Thread* self) {
+WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom() {
const size_t thread_count = GetThreadCount();
for (size_t i = 0; i < thread_count; ++i) {
// TODO: Use CAS instead of lock.
@@ -301,7 +301,7 @@ WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom(Thread* self) {
}
}
// Couldn't find something to steal.
- return NULL;
+ return nullptr;
}
WorkStealingThreadPool::~WorkStealingThreadPool() {}
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index c816c84..d6330c8 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -101,7 +101,7 @@ class ThreadPool {
// Try to get a task, returning NULL if there is none available.
Task* TryGetTask(Thread* self);
- Task* TryGetTaskLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
+ Task* TryGetTaskLocked() EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
// Are we shutting down?
bool IsShuttingDown() const EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_) {
@@ -178,7 +178,7 @@ class WorkStealingThreadPool : public ThreadPool {
size_t steal_index_;
// Find a task to steal from
- WorkStealingTask* FindTaskToStealFrom(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(work_steal_lock_);
+ WorkStealingTask* FindTaskToStealFrom() EXCLUSIVE_LOCKS_REQUIRED(work_steal_lock_);
friend class WorkStealingWorker;
};
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index 0e47d21..6e5deeb 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_THREAD_STATE_H_
#define ART_RUNTIME_THREAD_STATE_H_
+#include <ostream>
+
namespace art {
enum ThreadState {
@@ -43,6 +45,7 @@ enum ThreadState {
kNative, // RUNNABLE TS_RUNNING running in a JNI native method
kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger
};
+std::ostream& operator<<(std::ostream& os, const ThreadState& rhs);
} // namespace art
diff --git a/runtime/trace.cc b/runtime/trace.cc
index b3158a4..29c01e4 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -244,7 +244,8 @@ static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mu
the_trace->CompareAndUpdateStackTrace(thread, stack_trace);
}
-static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg) {
+static void ClearThreadStackTraceAndClockBase(Thread* thread ATTRIBUTE_UNUSED,
+ void* arg ATTRIBUTE_UNUSED) {
thread->SetTraceClockBase(0);
std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample();
thread->SetStackTraceSample(NULL);
@@ -561,27 +562,30 @@ void Trace::FinishTracing() {
void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t new_dex_pc) {
+ UNUSED(thread, this_object, method, new_dex_pc);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc;
}
-void Trace::FieldRead(Thread* /*thread*/, mirror::Object* this_object,
+void Trace::FieldRead(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread, this_object, method, dex_pc, field);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
-void Trace::FieldWritten(Thread* /*thread*/, mirror::Object* this_object,
+void Trace::FieldWritten(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
const JValue& field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread, this_object, method, dex_pc, field, field_value);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
-void Trace::MethodEntered(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc) {
+void Trace::MethodEntered(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -589,10 +593,9 @@ void Trace::MethodEntered(Thread* thread, mirror::Object* this_object,
thread_clock_diff, wall_clock_diff);
}
-void Trace::MethodExited(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc,
- const JValue& return_value) {
- UNUSED(return_value);
+void Trace::MethodExited(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED,
+ const JValue& return_value ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -600,8 +603,8 @@ void Trace::MethodExited(Thread* thread, mirror::Object* this_object,
thread_clock_diff, wall_clock_diff);
}
-void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc) {
+void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -613,6 +616,7 @@ void Trace::ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread, throw_location, catch_method, catch_dex_pc, exception_object);
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index b496f25..478066f 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -144,7 +144,7 @@ void Transaction::RecordWeakStringRemoval(mirror::String* s) {
LogInternedString(log);
}
-void Transaction::LogInternedString(InternStringLog& log) {
+void Transaction::LogInternedString(const InternStringLog& log) {
Locks::intern_table_lock_->AssertExclusiveHeld(Thread::Current());
MutexLock mu(Thread::Current(), log_lock_);
intern_string_logs_.push_front(log);
@@ -384,7 +384,7 @@ void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset fi
}
break;
default:
- LOG(FATAL) << "Unknown value kind " << field_value.kind;
+ LOG(FATAL) << "Unknown value kind " << static_cast<int>(field_value.kind);
break;
}
}
@@ -406,38 +406,38 @@ void Transaction::ObjectLog::VisitRoots(RootCallback* callback, void* arg) {
void Transaction::InternStringLog::Undo(InternTable* intern_table) {
DCHECK(intern_table != nullptr);
switch (string_op_) {
- case InternStringLog::kInsert: {
- switch (string_kind_) {
- case InternStringLog::kStrongString:
- intern_table->RemoveStrongFromTransaction(str_);
- break;
- case InternStringLog::kWeakString:
- intern_table->RemoveWeakFromTransaction(str_);
- break;
- default:
- LOG(FATAL) << "Unknown interned string kind";
- break;
- }
- break;
+ case InternStringLog::kInsert: {
+ switch (string_kind_) {
+ case InternStringLog::kStrongString:
+ intern_table->RemoveStrongFromTransaction(str_);
+ break;
+ case InternStringLog::kWeakString:
+ intern_table->RemoveWeakFromTransaction(str_);
+ break;
+ default:
+ LOG(FATAL) << "Unknown interned string kind";
+ break;
}
- case InternStringLog::kRemove: {
- switch (string_kind_) {
- case InternStringLog::kStrongString:
- intern_table->InsertStrongFromTransaction(str_);
- break;
- case InternStringLog::kWeakString:
- intern_table->InsertWeakFromTransaction(str_);
- break;
- default:
- LOG(FATAL) << "Unknown interned string kind";
- break;
- }
- break;
+ break;
+ }
+ case InternStringLog::kRemove: {
+ switch (string_kind_) {
+ case InternStringLog::kStrongString:
+ intern_table->InsertStrongFromTransaction(str_);
+ break;
+ case InternStringLog::kWeakString:
+ intern_table->InsertWeakFromTransaction(str_);
+ break;
+ default:
+ LOG(FATAL) << "Unknown interned string kind";
+ break;
}
- default:
- LOG(FATAL) << "Unknown interned string op";
- break;
+ break;
}
+ default:
+ LOG(FATAL) << "Unknown interned string op";
+ break;
+ }
}
void Transaction::InternStringLog::VisitRoots(RootCallback* callback, void* arg) {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 21d3c98..566f231 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -19,6 +19,7 @@
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/value_object.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "primitive.h"
@@ -35,7 +36,7 @@ class String;
}
class InternTable;
-class Transaction {
+class Transaction FINAL {
public:
Transaction();
~Transaction();
@@ -92,7 +93,7 @@ class Transaction {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- class ObjectLog {
+ class ObjectLog : public ValueObject {
public:
void LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile);
void LogByteValue(MemberOffset offset, int8_t value, bool is_volatile);
@@ -119,7 +120,7 @@ class Transaction {
k64Bits,
kReference
};
- struct FieldValue {
+ struct FieldValue : public ValueObject {
// TODO use JValue instead ?
uint64_t value;
FieldValueKind kind;
@@ -134,7 +135,7 @@ class Transaction {
std::map<uint32_t, FieldValue> field_values_;
};
- class ArrayLog {
+ class ArrayLog : public ValueObject {
public:
void LogValue(size_t index, uint64_t value);
@@ -153,7 +154,7 @@ class Transaction {
std::map<size_t, uint64_t> array_values_;
};
- class InternStringLog {
+ class InternStringLog : public ValueObject {
public:
enum StringKind {
kStrongString,
@@ -175,11 +176,11 @@ class Transaction {
private:
mirror::String* str_;
- StringKind string_kind_;
- StringOp string_op_;
+ const StringKind string_kind_;
+ const StringOp string_op_;
};
- void LogInternedString(InternStringLog& log)
+ void LogInternedString(const InternStringLog& log)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
LOCKS_EXCLUDED(log_lock_);
diff --git a/runtime/utils.h b/runtime/utils.h
index b7daa64..39011e2 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -467,15 +467,12 @@ class VoidFunctor {
template <typename A, typename B>
inline void operator() (A a, B b) const {
- UNUSED(a);
- UNUSED(b);
+ UNUSED(a, b);
}
template <typename A, typename B, typename C>
inline void operator() (A a, B b, C c) const {
- UNUSED(a);
- UNUSED(b);
- UNUSED(c);
+ UNUSED(a, b, c);
}
};