summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2014-11-24 13:42:22 -0800
committerAndreas Gampe <agampe@google.com>2014-11-24 13:43:11 -0800
commit9f612ffab2b188d80027d961d7118eb2c461b5ad (patch)
treeaa5a5c83810b13f726b16670bc4442a4f271ee55
parentc6c88d2079f0d47eb86a2e2e2f53cab25b0503c1 (diff)
downloadart-9f612ffab2b188d80027d961d7118eb2c461b5ad.zip
art-9f612ffab2b188d80027d961d7118eb2c461b5ad.tar.gz
art-9f612ffab2b188d80027d961d7118eb2c461b5ad.tar.bz2
ART: Fix unused variables and functions
Change-Id: Icbab884d2dfd71656347368b424cb35cbf524051
-rw-r--r--compiler/optimizing/graph_visualizer.h1
-rw-r--r--compiler/optimizing/optimization.h1
-rw-r--r--compiler/utils/arm/assembler_arm.cc3
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h94
-rw-r--r--runtime/entrypoints/entrypoint_utils.h98
-rw-r--r--runtime/native/java_lang_Thread.cc2
6 files changed, 97 insertions, 102 deletions
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index 4d8bec2..60d996b 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -30,7 +30,6 @@ class HGraph;
// TODO: Create an analysis/optimization abstraction.
static const char* kLivenessPassName = "liveness";
static const char* kRegisterAllocatorPassName = "register";
-static const char* kGVNPassName = "gvn";
/**
* If enabled, emits compilation information suitable for the c1visualizer tool
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index d281248..e36ef19 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -17,7 +17,6 @@
#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZATION_H_
#define ART_COMPILER_OPTIMIZING_OPTIMIZATION_H_
-#include "graph_visualizer.h"
#include "nodes.h"
namespace art {
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 9c84bc1..0f28591 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -205,10 +205,9 @@ uint32_t Address::encodingArm() const {
encoding = am_ | offset_;
}
} else {
- uint32_t imm5 = offset_;
uint32_t shift = shift_;
if (shift == RRX) {
- imm5 = 0;
+ CHECK_EQ(offset_, 0);
shift = ROR;
}
encoding = am_ | static_cast<uint32_t>(rm_) | shift << 5 | offset_ << 7 | B25;
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 1a8ca02..67265a2 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -37,9 +37,9 @@ namespace art {
template <const bool kAccessCheck>
ALWAYS_INLINE
-static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
- mirror::ArtMethod* method,
- Thread* self, bool* slow_path) {
+inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ Thread* self, bool* slow_path) {
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx);
if (UNLIKELY(klass == NULL)) {
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
@@ -90,9 +90,9 @@ static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
}
ALWAYS_INLINE
-static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
- Thread* self,
- bool* slow_path) {
+inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
+ Thread* self,
+ bool* slow_path) {
if (UNLIKELY(!klass->IsInitialized())) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> h_class(hs.NewHandle(klass));
@@ -120,10 +120,10 @@ static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class*
// check.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
-static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
- mirror::ArtMethod* method,
- Thread* self,
- gc::AllocatorType allocator_type) {
+inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
bool slow_path = false;
mirror::Class* klass = CheckObjectAlloc<kAccessCheck>(type_idx, method, self, &slow_path);
if (UNLIKELY(slow_path)) {
@@ -139,9 +139,9 @@ static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
// Given the context of a calling Method and a resolved class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE
-static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- Thread* self,
- gc::AllocatorType allocator_type) {
+inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
bool slow_path = false;
klass = CheckClassInitializedForObjectAlloc(klass, self, &slow_path);
@@ -160,9 +160,9 @@ static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
// Given the context of a calling Method and an initialized class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE
-static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
- Thread* self,
- gc::AllocatorType allocator_type) {
+inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
// Pass in false since the object can not be finalizable.
return klass->Alloc<kInstrumented, false>(self, allocator_type);
@@ -171,10 +171,10 @@ static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klas
template <bool kAccessCheck>
ALWAYS_INLINE
-static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
- mirror::ArtMethod* method,
- int32_t component_count,
- bool* slow_path) {
+inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ bool* slow_path) {
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
*slow_path = true;
@@ -207,11 +207,11 @@ static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
// check.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
-static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
- mirror::ArtMethod* method,
- int32_t component_count,
- Thread* self,
- gc::AllocatorType allocator_type) {
+inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
bool slow_path = false;
mirror::Class* klass = CheckArrayAlloc<kAccessCheck>(type_idx, method, component_count,
&slow_path);
@@ -230,11 +230,11 @@ static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
-static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
- int32_t component_count,
- Thread* self,
- gc::AllocatorType allocator_type) {
+inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
@@ -254,8 +254,8 @@ static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
}
template<FindFieldType type, bool access_check>
-static inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
- Thread* self, size_t expected_size) {
+inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+ Thread* self, size_t expected_size) {
bool is_primitive;
bool is_set;
bool is_static;
@@ -349,9 +349,9 @@ EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticPrimitiveWrite);
#undef EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL
template<InvokeType type, bool access_check>
-static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
- mirror::Object** this_object,
- mirror::ArtMethod** referrer, Thread* self) {
+inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
+ mirror::Object** this_object,
+ mirror::ArtMethod** referrer, Thread* self) {
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
mirror::ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer);
if (resolved_method == nullptr) {
@@ -475,9 +475,9 @@ EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kInterface);
#undef EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL
// Fast path field resolution that can't initialize classes or throw exceptions.
-static inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- FindFieldType type, size_t expected_size) {
+inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
+ mirror::ArtMethod* referrer,
+ FindFieldType type, size_t expected_size) {
mirror::ArtField* resolved_field =
referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx);
if (UNLIKELY(resolved_field == nullptr)) {
@@ -528,10 +528,10 @@ static inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
}
// Fast path method resolution that can't throw exceptions.
-static inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* referrer,
- bool access_check, InvokeType type) {
+inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::ArtMethod* referrer,
+ bool access_check, InvokeType type) {
if (UNLIKELY(this_object == NULL && type != kStatic)) {
return NULL;
}
@@ -568,7 +568,7 @@ static inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
}
}
-static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
+inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
mirror::ArtMethod* referrer,
Thread* self, bool can_run_clinit,
bool verify_access) {
@@ -604,13 +604,13 @@ static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
return h_class.Get();
}
-static inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
- uint32_t string_idx) {
+inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
+ uint32_t string_idx) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
return class_linker->ResolveString(string_idx, referrer);
}
-static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
+inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
// Save any pending exception over monitor exit call.
mirror::Throwable* saved_exception = NULL;
ThrowLocation saved_throw_location;
@@ -635,7 +635,7 @@ static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
}
template <typename INT_TYPE, typename FLOAT_TYPE>
-static inline INT_TYPE art_float_to_integral(FLOAT_TYPE f) {
+inline INT_TYPE art_float_to_integral(FLOAT_TYPE f) {
const INT_TYPE kMaxInt = static_cast<INT_TYPE>(std::numeric_limits<INT_TYPE>::max());
const INT_TYPE kMinInt = static_cast<INT_TYPE>(std::numeric_limits<INT_TYPE>::min());
const FLOAT_TYPE kMaxIntAsFloat = static_cast<FLOAT_TYPE>(kMaxInt);
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 311cafa..0531122 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -42,13 +42,14 @@ class ScopedObjectAccessAlreadyRunnable;
class Thread;
template <const bool kAccessCheck>
-ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
- mirror::ArtMethod* method,
- Thread* self, bool* slow_path)
+ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ Thread* self, bool* slow_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
- Thread* self, bool* slow_path)
+ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
+ Thread* self,
+ bool* slow_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
@@ -56,32 +57,32 @@ ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(m
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
- mirror::ArtMethod* method,
- Thread* self,
- gc::AllocatorType allocator_type)
+ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ Thread* self,
+ gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given the context of a calling Method and a resolved class, create an instance.
template <bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- Thread* self,
- gc::AllocatorType allocator_type)
+ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
+ Thread* self,
+ gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given the context of a calling Method and an initialized class, create an instance.
template <bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
- Thread* self,
- gc::AllocatorType allocator_type)
+ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
+ Thread* self,
+ gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <bool kAccessCheck>
-ALWAYS_INLINE static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
- mirror::ArtMethod* method,
- int32_t component_count,
- bool* slow_path)
+ALWAYS_INLINE inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ bool* slow_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
@@ -89,19 +90,19 @@ ALWAYS_INLINE static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
- mirror::ArtMethod* method,
- int32_t component_count,
- Thread* self,
- gc::AllocatorType allocator_type)
+ALWAYS_INLINE inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ Thread* self,
+ gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
- int32_t component_count,
- Thread* self,
- gc::AllocatorType allocator_type)
+ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ Thread* self,
+ gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method,
@@ -130,43 +131,42 @@ enum FindFieldType {
};
template<FindFieldType type, bool access_check>
-static inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
- Thread* self, size_t expected_size)
+inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod* referrer,
+ Thread* self, size_t expected_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<InvokeType type, bool access_check>
-static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
- mirror::Object** this_object,
- mirror::ArtMethod** referrer, Thread* self)
+inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
+ mirror::Object** this_object,
+ mirror::ArtMethod** referrer, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Fast path field resolution that can't initialize classes or throw exceptions.
-static inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- FindFieldType type, size_t expected_size)
+inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
+ mirror::ArtMethod* referrer,
+ FindFieldType type, size_t expected_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Fast path method resolution that can't throw exceptions.
-static inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* referrer,
- bool access_check, InvokeType type)
+inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::ArtMethod* referrer,
+ bool access_check, InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* self, bool can_run_clinit,
- bool verify_access)
+inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
+ mirror::ArtMethod* referrer,
+ Thread* self, bool can_run_clinit,
+ bool verify_access)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-static inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
- uint32_t string_idx)
+inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer, uint32_t string_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// TODO: annotalysis disabled as monitor semantics are maintained in Java code.
-static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
+inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
NO_THREAD_SAFETY_ANALYSIS;
void CheckReferenceResult(mirror::Object* o, Thread* self)
@@ -181,7 +181,7 @@ bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* pay
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <typename INT_TYPE, typename FLOAT_TYPE>
-static inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);
+inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);
} // namespace art
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 420e9df..760eb9b 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -118,14 +118,12 @@ static void Thread_nativeInterrupt(JNIEnv* env, jobject java_thread) {
static void Thread_nativeSetName(JNIEnv* env, jobject peer, jstring java_name) {
ScopedUtfChars name(env, java_name);
- Thread* self;
{
ScopedObjectAccess soa(env);
if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) {
soa.Self()->SetThreadName(name.c_str());
return;
}
- self = soa.Self();
}
// Suspend thread to avoid it from killing itself while we set its name. We don't just hold the
// thread list lock to avoid this, as setting the thread name causes mutator to lock/unlock