summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBernhard Rosenkränzer <Bernhard.Rosenkranzer@linaro.org>2013-12-12 02:15:52 +0100
committerBernhard Rosenkränzer <Bernhard.Rosenkranzer@linaro.org>2013-12-12 13:08:03 +0100
commit460536209b741bc469f1b0857775449abb2102fb (patch)
tree4bc7f636b86ab2531811827d0fd0c33011a7280e
parent19031d51ab9edfc3b1996891d654a6b86ca937c0 (diff)
downloadart-460536209b741bc469f1b0857775449abb2102fb.zip
art-460536209b741bc469f1b0857775449abb2102fb.tar.gz
art-460536209b741bc469f1b0857775449abb2102fb.tar.bz2
Don't rely on gcc extensions
Make the code more compatible with different compilers. clang doesn't allow extra static qualifiers on template specializations, const qualifiers on function types, or inline attributes on lambda functions, and is more picky about casting away constness with reinterpret_cast. These modifications are compatible with both gcc and clang. Change-Id: I739b10df2780bec537827a13679fd2bcc2cc7188 Signed-off-by: Bernhard Rosenkränzer <Bernhard.Rosenkranzer@linaro.org>
-rw-r--r--compiler/dex/portable/mir_to_gbc.cc1
-rw-r--r--compiler/driver/compiler_driver.cc2
-rw-r--r--compiler/image_writer.cc4
-rw-r--r--runtime/base/macros.h7
-rw-r--r--runtime/entrypoints/entrypoint_utils.h18
-rw-r--r--runtime/entrypoints/portable/portable_invoke_entrypoints.cc16
-rw-r--r--runtime/entrypoints/quick/quick_invoke_entrypoints.cc18
-rw-r--r--runtime/gc/collector/mark_sweep.cc2
-rw-r--r--runtime/gc/collector/semi_space.cc2
-rw-r--r--runtime/interpreter/interpreter.cc72
-rw-r--r--runtime/interpreter/interpreter_common.h52
11 files changed, 100 insertions, 94 deletions
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index e5b4876..70b660b 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -1660,7 +1660,6 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) {
uint16_t arg_reg = cu_->num_regs;
::llvm::Function::arg_iterator arg_iter(func_->arg_begin());
- ::llvm::Function::arg_iterator arg_end(func_->arg_end());
const char* shorty = cu_->shorty;
uint32_t shorty_size = strlen(shorty);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 8e666dd..43ed28c 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1430,7 +1430,7 @@ class ParallelCompilationManager {
private:
ParallelCompilationManager* const manager_;
const size_t end_;
- const Callback* const callback_;
+ Callback* const callback_;
};
AtomicInteger index_;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 90e2c65..02654ad 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -630,10 +630,10 @@ void ImageWriter::FixupMethod(const ArtMethod* orig, ArtMethod* copy) {
copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_to_interpreter_bridge_offset_));
#endif
copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
- (GetOatAddress(interpreter_to_interpreter_bridge_offset_)));
+ (const_cast<byte*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_))));
} else {
copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
- (GetOatAddress(interpreter_to_compiled_code_bridge_offset_)));
+ (const_cast<byte*>(GetOatAddress(interpreter_to_compiled_code_bridge_offset_))));
// Use original code if it exists. Otherwise, set the code pointer to the resolution
// trampoline.
const byte* code = GetOatAddress(orig->GetOatCodeOffset());
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 00a530a..cf7029a 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -140,6 +140,13 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
#define ALWAYS_INLINE __attribute__ ((always_inline))
#endif
+#ifdef __clang__
+/* clang doesn't like attributes on lambda functions */
+#define ALWAYS_INLINE_LAMBDA
+#else
+#define ALWAYS_INLINE_LAMBDA ALWAYS_INLINE
+#endif
+
#if defined (__APPLE__)
#define HOT_ATTR
#define COLD_ATTR
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index bfdbd74..a60446c 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -262,9 +262,9 @@ static inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, const mirr
// Explicit template declarations of FindFieldFromCode for all field access types.
#define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
-static mirror::ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \
- const mirror::ArtMethod* referrer, \
- Thread* self, size_t expected_size) \
+mirror::ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \
+ const mirror::ArtMethod* referrer, \
+ Thread* self, size_t expected_size) \
#define EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, false); \
@@ -393,12 +393,12 @@ static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror:
}
// Explicit template declarations of FindMethodFromCode for all invoke types.
-#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
- static mirror::ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \
- mirror::Object* this_object, \
- mirror::ArtMethod* referrer, \
- Thread* self)
+#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
+ template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
+ mirror::ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \
+ mirror::Object* this_object, \
+ mirror::ArtMethod* referrer, \
+ Thread* self)
#define EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, false); \
EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, true)
diff --git a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
index e2a0cc2..47ccbb1 100644
--- a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
@@ -22,8 +22,8 @@
namespace art {
template<InvokeType type, bool access_check>
-static mirror::ArtMethod* FindMethodHelper(uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method, Thread* thread) {
+mirror::ArtMethod* FindMethodHelper(uint32_t method_idx, mirror::Object* this_object,
+ mirror::ArtMethod* caller_method, Thread* thread) {
mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method,
access_check, type);
if (UNLIKELY(method == NULL)) {
@@ -46,12 +46,12 @@ static mirror::ArtMethod* FindMethodHelper(uint32_t method_idx, mirror::Object*
}
// Explicit template declarations of FindMethodHelper for all invoke types.
-#define EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, _access_check) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
- static mirror::ArtMethod* FindMethodHelper<_type, _access_check>(uint32_t method_idx, \
- mirror::Object* this_object, \
- mirror::ArtMethod* caller_method, \
- Thread* thread)
+#define EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, _access_check) \
+ template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
+ mirror::ArtMethod* FindMethodHelper<_type, _access_check>(uint32_t method_idx, \
+ mirror::Object* this_object, \
+ mirror::ArtMethod* caller_method, \
+ Thread* thread)
#define EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(_type) \
EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, false); \
EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, true)
diff --git a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
index b852a32..5a1b3e8 100644
--- a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
@@ -142,9 +142,9 @@ extern "C" uint64_t artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_me
}
template<InvokeType type, bool access_check>
-static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self, mirror::ArtMethod** sp) {
+uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self, mirror::ArtMethod** sp) {
mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method,
access_check, type);
if (UNLIKELY(method == NULL)) {
@@ -174,12 +174,12 @@ static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object
}
// Explicit template declarations of artInvokeCommon for all invoke types.
-#define EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL(_type, _access_check) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
- static uint64_t artInvokeCommon<_type, _access_check>(uint32_t method_idx, \
- mirror::Object* this_object, \
- mirror::ArtMethod* caller_method, \
- Thread* self, mirror::ArtMethod** sp)
+#define EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL(_type, _access_check) \
+ template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
+ uint64_t artInvokeCommon<_type, _access_check>(uint32_t method_idx, \
+ mirror::Object* this_object, \
+ mirror::ArtMethod* caller_method, \
+ Thread* self, mirror::ArtMethod** sp)
#define EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(_type) \
EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL(_type, false); \
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 0697a65..28cc510 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -669,7 +669,7 @@ class MarkStackTask : public Task {
MarkSweep* mark_sweep = chunk_task_->mark_sweep_;
mark_sweep->ScanObjectVisit(obj,
[mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) ALWAYS_INLINE {
+ bool /* is_static */) ALWAYS_INLINE_LAMBDA {
if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) {
if (kUseFinger) {
android_memory_barrier();
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index b75b493..923560e 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -558,7 +558,7 @@ void SemiSpace::ScanObject(Object* obj) {
DCHECK(obj != NULL);
DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset,
- bool /* is_static */) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+ bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS {
mirror::Object* new_address = MarkObject(ref);
if (new_address != ref) {
DCHECK(new_address != nullptr);
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 02c9012..f574a0f 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -90,8 +90,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedObjectAccessUnchecked soa(self);
if (method->IsStatic()) {
if (shorty == "L") {
- typedef jobject (fnptr)(JNIEnv*, jclass);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef jobject (fntype)(JNIEnv*, jclass);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
jobject jresult;
@@ -101,36 +101,36 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
}
result->SetL(soa.Decode<Object*>(jresult));
} else if (shorty == "V") {
- typedef void (fnptr)(JNIEnv*, jclass);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef void (fntype)(JNIEnv*, jclass);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get());
} else if (shorty == "Z") {
- typedef jboolean (fnptr)(JNIEnv*, jclass);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef jboolean (fntype)(JNIEnv*, jclass);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get()));
} else if (shorty == "BI") {
- typedef jbyte (fnptr)(JNIEnv*, jclass, jint);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef jbyte (fntype)(JNIEnv*, jclass, jint);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetB(fn(soa.Env(), klass.get(), args[0]));
} else if (shorty == "II") {
- typedef jint (fnptr)(JNIEnv*, jclass, jint);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef jint (fntype)(JNIEnv*, jclass, jint);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), args[0]));
} else if (shorty == "LL") {
- typedef jobject (fnptr)(JNIEnv*, jclass, jobject);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef jobject (fntype)(JNIEnv*, jclass, jobject);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -142,15 +142,15 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
}
result->SetL(soa.Decode<Object*>(jresult));
} else if (shorty == "IIZ") {
- typedef jint (fnptr)(JNIEnv*, jclass, jint, jboolean);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), args[0], args[1]));
} else if (shorty == "ILI") {
- typedef jint (fnptr)(JNIEnv*, jclass, jobject, jint);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef jint (fntype)(JNIEnv*, jclass, jobject, jint);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -158,22 +158,22 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
} else if (shorty == "SIZ") {
- typedef jshort (fnptr)(JNIEnv*, jclass, jint, jboolean);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetS(fn(soa.Env(), klass.get(), args[0], args[1]));
} else if (shorty == "VIZ") {
- typedef void (fnptr)(JNIEnv*, jclass, jint, jboolean);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef void (fntype)(JNIEnv*, jclass, jint, jboolean);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), args[0], args[1]);
} else if (shorty == "ZLL") {
- typedef jboolean (fnptr)(JNIEnv*, jclass, jobject, jobject);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef jboolean (fntype)(JNIEnv*, jclass, jobject, jobject);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -183,8 +183,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
} else if (shorty == "ZILL") {
- typedef jboolean (fnptr)(JNIEnv*, jclass, jint, jobject, jobject);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef jboolean (fntype)(JNIEnv*, jclass, jint, jobject, jobject);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
@@ -194,8 +194,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
} else if (shorty == "VILII") {
- typedef void (fnptr)(JNIEnv*, jclass, jint, jobject, jint, jint);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef void (fntype)(JNIEnv*, jclass, jint, jobject, jint, jint);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
@@ -203,8 +203,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
} else if (shorty == "VLILII") {
- typedef void (fnptr)(JNIEnv*, jclass, jobject, jint, jobject, jint, jint);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef void (fntype)(JNIEnv*, jclass, jobject, jint, jobject, jint, jint);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -219,8 +219,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
}
} else {
if (shorty == "L") {
- typedef jobject (fnptr)(JNIEnv*, jobject);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef jobject (fntype)(JNIEnv*, jobject);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
jobject jresult;
@@ -230,15 +230,15 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
}
result->SetL(soa.Decode<Object*>(jresult));
} else if (shorty == "V") {
- typedef void (fnptr)(JNIEnv*, jobject);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef void (fntype)(JNIEnv*, jobject);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), rcvr.get());
} else if (shorty == "LL") {
- typedef jobject (fnptr)(JNIEnv*, jobject, jobject);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef jobject (fntype)(JNIEnv*, jobject, jobject);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -251,8 +251,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
result->SetL(soa.Decode<Object*>(jresult));
ScopedThreadStateChange tsc(self, kNative);
} else if (shorty == "III") {
- typedef jint (fnptr)(JNIEnv*, jobject, jint, jint);
- const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod());
+ typedef jint (fntype)(JNIEnv*, jobject, jint, jint);
+ fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedThreadStateChange tsc(self, kNative);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 3b8d50b..a9b8909 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -557,11 +557,11 @@ static inline bool IsBackwardBranch(int32_t branch_offset) {
}
// Explicitly instantiate all DoInvoke functions.
-#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
- static bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame, \
- const Instruction* inst, uint16_t inst_data, \
- JValue* result)
+#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \
+ template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
+ bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame, \
+ const Instruction* inst, uint16_t inst_data, \
+ JValue* result)
#define EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(_type) \
EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, false, false); \
@@ -578,10 +578,10 @@ EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kInterface); // invoke-interface/range.
#undef EXPLICIT_DO_INVOKE_TEMPLATE_DECL
// Explicitly instantiate all DoFieldGet functions.
-#define EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL(_find_type, _field_type, _do_check) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
- static bool DoFieldGet<_find_type, _field_type, _do_check>(Thread* self, ShadowFrame& shadow_frame, \
- const Instruction* inst, uint16_t inst_data)
+#define EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL(_find_type, _field_type, _do_check) \
+ template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
+ bool DoFieldGet<_find_type, _field_type, _do_check>(Thread* self, ShadowFrame& shadow_frame, \
+ const Instruction* inst, uint16_t inst_data)
#define EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(_find_type, _field_type) \
EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL(_find_type, _field_type, false); \
@@ -609,10 +609,10 @@ EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(StaticObjectRead, Primitive::kPrimNot);
#undef EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL
// Explicitly instantiate all DoFieldPut functions.
-#define EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL(_find_type, _field_type, _do_check) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
- static bool DoFieldPut<_find_type, _field_type, _do_check>(Thread* self, const ShadowFrame& shadow_frame, \
- const Instruction* inst, uint16_t inst_data)
+#define EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL(_find_type, _field_type, _do_check) \
+ template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
+ bool DoFieldPut<_find_type, _field_type, _do_check>(Thread* self, const ShadowFrame& shadow_frame, \
+ const Instruction* inst, uint16_t inst_data)
#define EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(_find_type, _field_type) \
EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL(_find_type, _field_type, false); \
@@ -640,21 +640,21 @@ EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(StaticObjectWrite, Primitive::kPrimNot);
#undef EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL
// Explicitly instantiate all DoInvokeVirtualQuick functions.
-#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
- static bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \
- const Instruction* inst, uint16_t inst_data, \
- JValue* result)
+#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \
+ template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
+ bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \
+ const Instruction* inst, uint16_t inst_data, \
+ JValue* result)
EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(false); // invoke-virtual-quick.
EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(true); // invoke-virtual-quick-range.
#undef EXPLICIT_INSTANTIATION_DO_INVOKE_VIRTUAL_QUICK
// Explicitly instantiate all DoIGetQuick functions.
-#define EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(_field_type) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
- static bool DoIGetQuick<_field_type>(ShadowFrame& shadow_frame, const Instruction* inst, \
- uint16_t inst_data)
+#define EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(_field_type) \
+ template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
+ bool DoIGetQuick<_field_type>(ShadowFrame& shadow_frame, const Instruction* inst, \
+ uint16_t inst_data)
EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimInt); // iget-quick.
EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimLong); // iget-wide-quick.
@@ -662,10 +662,10 @@ EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimNot); // iget-object-qui
#undef EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL
// Explicitly instantiate all DoIPutQuick functions.
-#define EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(_field_type) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
- static bool DoIPutQuick<_field_type>(const ShadowFrame& shadow_frame, const Instruction* inst, \
- uint16_t inst_data)
+#define EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(_field_type) \
+ template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \
+ bool DoIPutQuick<_field_type>(const ShadowFrame& shadow_frame, const Instruction* inst, \
+ uint16_t inst_data)
EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(Primitive::kPrimInt); // iget-quick.
EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(Primitive::kPrimLong); // iget-wide-quick.