summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.mk4
-rw-r--r--runtime/base/logging.h1
-rw-r--r--runtime/base/mutex.h1
-rw-r--r--runtime/class_linker.cc102
-rw-r--r--runtime/debugger.cc7
-rw-r--r--runtime/dex_file.h7
-rw-r--r--runtime/dex_instruction_utils.h233
-rw-r--r--runtime/gc/heap.cc2
-rw-r--r--runtime/gc/space/image_space.cc2
-rw-r--r--runtime/instrumentation.cc27
-rw-r--r--runtime/instrumentation.h37
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc25
-rw-r--r--runtime/java_vm_ext.cc3
-rw-r--r--runtime/jit/jit.cc160
-rw-r--r--runtime/jit/jit.h102
-rw-r--r--runtime/jit/jit_code_cache.cc121
-rw-r--r--runtime/jit/jit_code_cache.h118
-rw-r--r--runtime/jit/jit_instrumentation.cc117
-rw-r--r--runtime/jit/jit_instrumentation.h107
-rw-r--r--runtime/mirror/art_field.cc2
-rw-r--r--runtime/mirror/art_method-inl.h5
-rw-r--r--runtime/mirror/art_method.cc17
-rw-r--r--runtime/mirror/art_method.h31
-rw-r--r--runtime/oat_file.cc8
-rw-r--r--runtime/oat_file.h4
-rw-r--r--runtime/object_lock.cc1
-rw-r--r--runtime/parsed_options.cc46
-rw-r--r--runtime/profiler.cc2
-rw-r--r--runtime/quick/inline_method_analyser.cc12
-rw-r--r--runtime/runtime.cc101
-rw-r--r--runtime/runtime.h31
-rw-r--r--runtime/runtime_options.def3
-rw-r--r--runtime/runtime_options.h2
-rw-r--r--runtime/signal_catcher.cc2
-rw-r--r--runtime/thread.cc2
-rw-r--r--runtime/trace.cc8
-rw-r--r--runtime/trace.h3
-rw-r--r--runtime/transaction.cc2
-rw-r--r--runtime/utils.cc6
-rw-r--r--runtime/verifier/method_verifier.cc47
-rw-r--r--runtime/verifier/method_verifier.h18
41 files changed, 1364 insertions, 165 deletions
diff --git a/runtime/Android.mk b/runtime/Android.mk
index c647cc2..ab346e3 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -88,6 +88,9 @@ LIBART_COMMON_SRC_FILES := \
jdwp/jdwp_socket.cc \
jdwp/object_registry.cc \
jni_env_ext.cc \
+ jit/jit.cc \
+ jit/jit_code_cache.cc \
+ jit/jit_instrumentation.cc \
jni_internal.cc \
jobject_comparator.cc \
mem_map.cc \
@@ -298,6 +301,7 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
base/unix_file/fd_file.h \
dex_file.h \
dex_instruction.h \
+ dex_instruction_utils.h \
gc_root.h \
gc/allocator/rosalloc.h \
gc/collector/gc_type.h \
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index cc1a4a1..3d007ba 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -42,6 +42,7 @@ struct LogVerbosity {
bool gc;
bool heap;
bool jdwp;
+ bool jit;
bool jni;
bool monitor;
bool profiler;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 745b209..45d2347 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -67,6 +67,7 @@ enum LockLevel {
kReferenceQueueWeakReferencesLock,
kReferenceQueueClearedReferencesLock,
kReferenceProcessorLock,
+ kJitCodeCacheLock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
kRosAllocBulkFreeLock,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3278751..f28253a 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -43,6 +43,8 @@
#include "handle_scope.h"
#include "intern_table.h"
#include "interpreter/interpreter.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "leb128.h"
#include "oat.h"
#include "oat_file.h"
@@ -91,15 +93,14 @@ static void ThrowEarlierClassFailure(mirror::Class* c)
// a NoClassDefFoundError (v2 2.17.5). The exception to this rule is if we
// failed in verification, in which case v2 5.4.1 says we need to re-throw
// the previous error.
- Runtime* runtime = Runtime::Current();
- bool is_compiler = runtime->IsCompiler();
- if (!is_compiler) { // Give info if this occurs at runtime.
+ Runtime* const runtime = Runtime::Current();
+ if (!runtime->IsAotCompiler()) { // Give info if this occurs at runtime.
LOG(INFO) << "Rejecting re-init on previously-failed class " << PrettyClass(c);
}
CHECK(c->IsErroneous()) << PrettyClass(c) << " " << c->GetStatus();
Thread* self = Thread::Current();
- if (is_compiler) {
+ if (runtime->IsAotCompiler()) {
// At compile time, accurate errors and NCDFE are disabled to speed compilation.
mirror::Throwable* pre_allocated = runtime->GetPreAllocatedNoClassDefFoundError();
self->SetException(ThrowLocation(), pre_allocated);
@@ -428,7 +429,7 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
// Set up GenericJNI entrypoint. That is mainly a hack for common_compiler_test.h so that
// we do not need friend classes or a publicly exposed setter.
quick_generic_jni_trampoline_ = GetQuickGenericJniStub();
- if (!runtime->IsCompiler()) {
+ if (!runtime->IsAotCompiler()) {
// We need to set up the generic trampolines since we don't have an image.
quick_resolution_trampoline_ = GetQuickResolutionStub();
quick_imt_conflict_trampoline_ = GetQuickImtConflictStub();
@@ -1037,8 +1038,7 @@ const OatFile* ClassLinker::FindOatFileInOatLocationForDexFile(const char* dex_l
const char* oat_location,
std::string* error_msg) {
std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr, nullptr,
- !Runtime::Current()->IsCompiler(),
- error_msg));
+ !Runtime::Current()->IsAotCompiler(), error_msg));
if (oat_file.get() == nullptr) {
*error_msg = StringPrintf("Failed to find existing oat file at %s: %s", oat_location,
error_msg->c_str());
@@ -1109,8 +1109,8 @@ const OatFile* ClassLinker::CreateOatFileForDexLocation(const char* dex_location
return nullptr;
}
std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr, nullptr,
- !Runtime::Current()->IsCompiler(),
- &error_msg));
+ !Runtime::Current()->IsAotCompiler(),
+ &error_msg));
if (oat_file.get() == nullptr) {
std::string compound_msg = StringPrintf("\nFailed to open generated oat file '%s': %s",
oat_location, error_msg.c_str());
@@ -1350,7 +1350,7 @@ const OatFile* ClassLinker::OpenOatFileFromDexLocation(const std::string& dex_lo
*already_opened = false;
const Runtime* runtime = Runtime::Current();
CHECK(runtime != nullptr);
- bool executable = !runtime->IsCompiler();
+ bool executable = !runtime->IsAotCompiler();
std::string odex_error_msg;
bool should_patch_system = false;
@@ -1518,7 +1518,7 @@ const OatFile* ClassLinker::PatchAndRetrieveOat(const std::string& input_oat,
bool success = Exec(argv, error_msg);
if (success) {
std::unique_ptr<OatFile> output(OatFile::Open(output_oat, output_oat, nullptr, nullptr,
- !runtime->IsCompiler(), error_msg));
+ !runtime->IsAotCompiler(), error_msg));
bool checksum_verified = false;
if (output.get() != nullptr && CheckOatFile(runtime, output.get(), isa, &checksum_verified,
error_msg)) {
@@ -1532,7 +1532,7 @@ const OatFile* ClassLinker::PatchAndRetrieveOat(const std::string& input_oat,
"but was unable to open output file '%s': %s",
input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
}
- } else if (!runtime->IsCompiler()) {
+ } else if (!runtime->IsAotCompiler()) {
// patchoat failed which means we probably don't have enough room to place the output oat file,
// instead of failing we should just run the interpreter from the dex files in the input oat.
LOG(WARNING) << "Patching of oat file '" << input_oat << "' failed. Attempting to use oat file "
@@ -1619,22 +1619,20 @@ const OatFile* ClassLinker::FindOatFileFromOatLocation(const std::string& oat_lo
if (oat_file != nullptr) {
return oat_file;
}
-
- return OatFile::Open(oat_location, oat_location, nullptr, nullptr, !Runtime::Current()->IsCompiler(),
- error_msg);
+ return OatFile::Open(oat_location, oat_location, nullptr, nullptr,
+ !Runtime::Current()->IsAotCompiler(), error_msg);
}
void ClassLinker::InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg) {
ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
DCHECK(obj != nullptr);
DCHECK(class_linker != nullptr);
- size_t pointer_size = class_linker->image_pointer_size_;
-
if (obj->IsArtMethod()) {
mirror::ArtMethod* method = obj->AsArtMethod();
if (!method->IsNative()) {
+ const size_t pointer_size = class_linker->image_pointer_size_;
method->SetEntryPointFromInterpreterPtrSize(artInterpreterToInterpreterBridge, pointer_size);
- if (method != Runtime::Current()->GetResolutionMethod()) {
+ if (!method->IsRuntimeMethod() && method != Runtime::Current()->GetResolutionMethod()) {
method->SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(),
pointer_size);
}
@@ -1703,8 +1701,8 @@ void ClassLinker::InitFromImage() {
// bitmap walk.
mirror::ArtMethod::SetClass(GetClassRoot(kJavaLangReflectArtMethod));
size_t art_method_object_size = mirror::ArtMethod::GetJavaLangReflectArtMethod()->GetObjectSize();
- if (!Runtime::Current()->IsCompiler()) {
- // Compiler supports having an image with a different pointer size than the runtime. This
+ if (!Runtime::Current()->IsAotCompiler()) {
+ // Aot compiler supports having an image with a different pointer size than the runtime. This
// happens on the host for compile 32 bit tests since we use a 64 bit libart compiler. We may
// also use 32 bit dex2oat on a system with 64 bit apps.
CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(sizeof(void*)))
@@ -1719,7 +1717,7 @@ void ClassLinker::InitFromImage() {
// Set entry point to interpreter if in InterpretOnly mode.
Runtime* runtime = Runtime::Current();
- if (!runtime->IsCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
+ if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
heap->VisitObjects(InitFromImageInterpretOnlyCallback, this);
}
@@ -2522,31 +2520,44 @@ const void* ClassLinker::GetQuickOatCodeFor(mirror::ArtMethod* method) {
return GetQuickProxyInvokeHandler();
}
bool found;
+ jit::Jit* const jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ auto* code = jit->GetCodeCache()->GetCodeFor(method);
+ if (code != nullptr) {
+ return code;
+ }
+ }
OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
- const void* result = nullptr;
if (found) {
- result = oat_method.GetQuickCode();
- }
-
- if (result == nullptr) {
- if (method->IsNative()) {
- // No code and native? Use generic trampoline.
- result = GetQuickGenericJniStub();
- } else {
- // No code? You must mean to go into the interpreter.
- result = GetQuickToInterpreterBridge();
+ auto* code = oat_method.GetQuickCode();
+ if (code != nullptr) {
+ return code;
}
}
- return result;
+ if (method->IsNative()) {
+ // No code and native? Use generic trampoline.
+ return GetQuickGenericJniStub();
+ }
+ return GetQuickToInterpreterBridge();
}
const void* ClassLinker::GetOatMethodQuickCodeFor(mirror::ArtMethod* method) {
if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
return nullptr;
}
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ auto* code = jit->GetCodeCache()->GetCodeFor(method);
+ if (code != nullptr) {
+ return code;
+ }
+ }
bool found;
OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
- return found ? oat_method.GetQuickCode() : nullptr;
+ if (found) {
+ return oat_method.GetQuickCode();
+ }
+ return nullptr;
}
const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
@@ -2582,7 +2593,7 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
}
Runtime* runtime = Runtime::Current();
if (!runtime->IsStarted() || runtime->UseCompileTimeClassPath()) {
- if (runtime->IsCompiler() || runtime->GetHeap()->HasImageSpace()) {
+ if (runtime->IsAotCompiler() || runtime->GetHeap()->HasImageSpace()) {
return; // OAT file unavailable.
}
}
@@ -2635,7 +2646,7 @@ void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
const OatFile::OatClass* oat_class,
uint32_t class_def_method_index) {
Runtime* runtime = Runtime::Current();
- if (runtime->IsCompiler()) {
+ if (runtime->IsAotCompiler()) {
// The following code only applies to a non-compiler runtime.
return;
}
@@ -3474,7 +3485,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
EnsurePreverifiedMethods(klass);
return;
}
- if (klass->IsCompileTimeVerified() && Runtime::Current()->IsCompiler()) {
+ if (klass->IsCompileTimeVerified() && Runtime::Current()->IsAotCompiler()) {
return;
}
@@ -3490,7 +3501,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
} else {
CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime)
<< PrettyClass(klass.Get());
- CHECK(!Runtime::Current()->IsCompiler());
+ CHECK(!Runtime::Current()->IsAotCompiler());
klass->SetStatus(mirror::Class::kStatusVerifyingAtRuntime, self);
}
@@ -3526,7 +3537,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
self->GetException(nullptr)->SetCause(cause.Get());
}
ClassReference ref(klass->GetDexCache()->GetDexFile(), klass->GetDexClassDefIndex());
- if (Runtime::Current()->IsCompiler()) {
+ if (Runtime::Current()->IsAotCompiler()) {
Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref);
}
klass->SetStatus(mirror::Class::kStatusError, self);
@@ -3551,7 +3562,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
std::string error_msg;
if (!preverified) {
verifier_failure = verifier::MethodVerifier::VerifyClass(self, klass.Get(),
- Runtime::Current()->IsCompiler(),
+ Runtime::Current()->IsAotCompiler(),
&error_msg);
}
if (preverified || verifier_failure != verifier::MethodVerifier::kHardFailure) {
@@ -3579,7 +3590,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
// Soft failures at compile time should be retried at runtime. Soft
// failures at runtime will be handled by slow paths in the generated
// code. Set status accordingly.
- if (Runtime::Current()->IsCompiler()) {
+ if (Runtime::Current()->IsAotCompiler()) {
klass->SetStatus(mirror::Class::kStatusRetryVerificationAtRuntime, self);
} else {
klass->SetStatus(mirror::Class::kStatusVerified, self);
@@ -3620,7 +3631,7 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class
// we are not compiling the image or if the class we're verifying is not part of
// the app. In other words, we will only check for preverification of bootclasspath
// classes.
- if (Runtime::Current()->IsCompiler()) {
+ if (Runtime::Current()->IsAotCompiler()) {
// Are we compiling the bootclasspath?
if (!Runtime::Current()->UseCompileTimeClassPath()) {
return false;
@@ -3646,7 +3657,7 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class
// image (that we just failed loading), and the verifier can't be run on quickened opcodes when
// the runtime isn't started. On the other hand, app classes can be re-verified even if they are
// already pre-opted, as then the runtime is started.
- if (!Runtime::Current()->IsCompiler() &&
+ if (!Runtime::Current()->IsAotCompiler() &&
!Runtime::Current()->GetHeap()->HasImageSpace() &&
klass->GetClassLoader() != nullptr) {
return false;
@@ -4094,7 +4105,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
CHECK(self->IsExceptionPending());
VlogClassInitializationFailure(klass);
} else {
- CHECK(Runtime::Current()->IsCompiler());
+ CHECK(Runtime::Current()->IsAotCompiler());
CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime);
}
return false;
@@ -4275,7 +4286,8 @@ bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* se
if (klass->GetStatus() == mirror::Class::kStatusInitializing) {
continue;
}
- if (klass->GetStatus() == mirror::Class::kStatusVerified && Runtime::Current()->IsCompiler()) {
+ if (klass->GetStatus() == mirror::Class::kStatusVerified &&
+ Runtime::Current()->IsAotCompiler()) {
// Compile time initialization failed.
return false;
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index a3d3b47..f33c0f8 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -287,6 +287,13 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
}
+ // We only care about how many backward branches were executed in the Jit.
+ void BackwardBranch(Thread* /*thread*/, mirror::ArtMethod* method, int32_t dex_pc_offset)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ LOG(ERROR) << "Unexpected backward branch event in debugger " << PrettyMethod(method)
+ << " " << dex_pc_offset;
+ }
+
private:
DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
} gDebugInstrumentationListener;
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index e121a08..c8ede48 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -1028,6 +1028,13 @@ class DexFile {
// from an oat file, e.g., directly from an apk.
const OatFile* oat_file_;
};
+
+struct DexFileReference {
+ DexFileReference(const DexFile* file, uint32_t idx) : dex_file(file), index(idx) { }
+ const DexFile* dex_file;
+ uint32_t index;
+};
+
std::ostream& operator<<(std::ostream& os, const DexFile& dex_file);
// Iterate over a dex file's ProtoId's paramters
diff --git a/runtime/dex_instruction_utils.h b/runtime/dex_instruction_utils.h
new file mode 100644
index 0000000..1a671c5
--- /dev/null
+++ b/runtime/dex_instruction_utils.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX_INSTRUCTION_UTILS_H_
+#define ART_RUNTIME_DEX_INSTRUCTION_UTILS_H_
+
+#include "dex_instruction.h"
+
+namespace art {
+
+// Dex invoke type corresponds to the ordering of INVOKE instructions;
+// this order is the same for range and non-range invokes.
+enum DexInvokeType : uint8_t {
+ kDexInvokeVirtual = 0, // invoke-virtual, invoke-virtual-range
+ kDexInvokeSuper, // invoke-super, invoke-super-range
+ kDexInvokeDirect, // invoke-direct, invoke-direct-range
+ kDexInvokeStatic, // invoke-static, invoke-static-range
+ kDexInvokeInterface, // invoke-interface, invoke-interface-range
+ kDexInvokeTypeCount
+};
+
+// Dex instruction memory access types correspond to the ordering of GET/PUT instructions;
+// this order is the same for IGET, IPUT, SGET, SPUT, AGET and APUT.
+enum DexMemAccessType : uint8_t {
+ kDexMemAccessWord = 0, // op 0; int or float, the actual type is not encoded.
+ kDexMemAccessWide, // op_WIDE 1; long or double, the actual type is not encoded.
+ kDexMemAccessObject, // op_OBJECT 2; the actual reference type is not encoded.
+ kDexMemAccessBoolean, // op_BOOLEAN 3
+ kDexMemAccessByte, // op_BYTE 4
+ kDexMemAccessChar, // op_CHAR 5
+ kDexMemAccessShort, // op_SHORT 6
+ kDexMemAccessTypeCount
+};
+
+std::ostream& operator<<(std::ostream& os, const DexMemAccessType& type);
+
+// NOTE: The following functions disregard quickened instructions.
+
+constexpr bool IsInstructionReturn(Instruction::Code opcode) {
+ return Instruction::RETURN_VOID <= opcode && opcode <= Instruction::RETURN_OBJECT;
+}
+
+constexpr bool IsInstructionInvoke(Instruction::Code opcode) {
+ return Instruction::INVOKE_VIRTUAL <= opcode && opcode <= Instruction::INVOKE_INTERFACE_RANGE &&
+ opcode != Instruction::RETURN_VOID_BARRIER;
+}
+
+constexpr bool IsInstructionQuickInvoke(Instruction::Code opcode) {
+ return opcode == Instruction::INVOKE_VIRTUAL_QUICK ||
+ opcode == Instruction::INVOKE_VIRTUAL_RANGE_QUICK;
+}
+
+constexpr bool IsInstructionInvokeStatic(Instruction::Code opcode) {
+ return opcode == Instruction::INVOKE_STATIC || opcode == Instruction::INVOKE_STATIC_RANGE;
+}
+
+constexpr bool IsInstructionGoto(Instruction::Code opcode) {
+ return Instruction::GOTO <= opcode && opcode <= Instruction::GOTO_32;
+}
+
+constexpr bool IsInstructionIfCc(Instruction::Code opcode) {
+ return Instruction::IF_EQ <= opcode && opcode <= Instruction::IF_LE;
+}
+
+constexpr bool IsInstructionIfCcZ(Instruction::Code opcode) {
+ return Instruction::IF_EQZ <= opcode && opcode <= Instruction::IF_LEZ;
+}
+
+constexpr bool IsInstructionIGet(Instruction::Code code) {
+ return Instruction::IGET <= code && code <= Instruction::IGET_SHORT;
+}
+
+constexpr bool IsInstructionIPut(Instruction::Code code) {
+ return Instruction::IPUT <= code && code <= Instruction::IPUT_SHORT;
+}
+
+constexpr bool IsInstructionSGet(Instruction::Code code) {
+ return Instruction::SGET <= code && code <= Instruction::SGET_SHORT;
+}
+
+constexpr bool IsInstructionSPut(Instruction::Code code) {
+ return Instruction::SPUT <= code && code <= Instruction::SPUT_SHORT;
+}
+
+constexpr bool IsInstructionAGet(Instruction::Code code) {
+ return Instruction::AGET <= code && code <= Instruction::AGET_SHORT;
+}
+
+constexpr bool IsInstructionAPut(Instruction::Code code) {
+ return Instruction::APUT <= code && code <= Instruction::APUT_SHORT;
+}
+
+constexpr bool IsInstructionIGetOrIPut(Instruction::Code code) {
+ return Instruction::IGET <= code && code <= Instruction::IPUT_SHORT;
+}
+
+constexpr bool IsInstructionIGetQuickOrIPutQuick(Instruction::Code code) {
+ return (code >= Instruction::IGET_QUICK && code <= Instruction::IPUT_OBJECT_QUICK) ||
+ (code >= Instruction::IPUT_BOOLEAN_QUICK && code <= Instruction::IGET_SHORT_QUICK);
+}
+
+constexpr bool IsInstructionSGetOrSPut(Instruction::Code code) {
+ return Instruction::SGET <= code && code <= Instruction::SPUT_SHORT;
+}
+
+constexpr bool IsInstructionAGetOrAPut(Instruction::Code code) {
+ return Instruction::AGET <= code && code <= Instruction::APUT_SHORT;
+}
+
+constexpr bool IsInstructionBinOp2Addr(Instruction::Code code) {
+ return Instruction::ADD_INT_2ADDR <= code && code <= Instruction::REM_DOUBLE_2ADDR;
+}
+
+// TODO: Remove the #if guards below when we fully migrate to C++14.
+
+constexpr bool IsInvokeInstructionRange(Instruction::Code opcode) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionInvoke(opcode));
+#endif
+ return opcode >= Instruction::INVOKE_VIRTUAL_RANGE;
+}
+
+constexpr DexInvokeType InvokeInstructionType(Instruction::Code opcode) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionInvoke(opcode));
+#endif
+ return static_cast<DexInvokeType>(IsInvokeInstructionRange(opcode)
+ ? (opcode - Instruction::INVOKE_VIRTUAL_RANGE)
+ : (opcode - Instruction::INVOKE_VIRTUAL));
+}
+
+constexpr DexMemAccessType IGetMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionIGet(opcode));
+#endif
+ return static_cast<DexMemAccessType>(code - Instruction::IGET);
+}
+
+constexpr DexMemAccessType IPutMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionIPut(opcode));
+#endif
+ return static_cast<DexMemAccessType>(code - Instruction::IPUT);
+}
+
+constexpr DexMemAccessType SGetMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionSGet(opcode));
+#endif
+ return static_cast<DexMemAccessType>(code - Instruction::SGET);
+}
+
+constexpr DexMemAccessType SPutMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionSPut(opcode));
+#endif
+ return static_cast<DexMemAccessType>(code - Instruction::SPUT);
+}
+
+constexpr DexMemAccessType AGetMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionAGet(opcode));
+#endif
+ return static_cast<DexMemAccessType>(code - Instruction::AGET);
+}
+
+constexpr DexMemAccessType APutMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionAPut(opcode));
+#endif
+ return static_cast<DexMemAccessType>(code - Instruction::APUT);
+}
+
+constexpr DexMemAccessType IGetOrIPutMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionIGetOrIPut(opcode));
+#endif
+ return (code >= Instruction::IPUT) ? IPutMemAccessType(code) : IGetMemAccessType(code);
+}
+
+static inline DexMemAccessType IGetQuickOrIPutQuickMemAccessType(Instruction::Code code) {
+ DCHECK(IsInstructionIGetQuickOrIPutQuick(code));
+ switch (code) {
+ case Instruction::IGET_QUICK: case Instruction::IPUT_QUICK:
+ return kDexMemAccessWord;
+ case Instruction::IGET_WIDE_QUICK: case Instruction::IPUT_WIDE_QUICK:
+ return kDexMemAccessWide;
+ case Instruction::IGET_OBJECT_QUICK: case Instruction::IPUT_OBJECT_QUICK:
+ return kDexMemAccessObject;
+ case Instruction::IGET_BOOLEAN_QUICK: case Instruction::IPUT_BOOLEAN_QUICK:
+ return kDexMemAccessBoolean;
+ case Instruction::IGET_BYTE_QUICK: case Instruction::IPUT_BYTE_QUICK:
+ return kDexMemAccessByte;
+ case Instruction::IGET_CHAR_QUICK: case Instruction::IPUT_CHAR_QUICK:
+ return kDexMemAccessChar;
+ case Instruction::IGET_SHORT_QUICK: case Instruction::IPUT_SHORT_QUICK:
+ return kDexMemAccessShort;
+ default:
+ LOG(FATAL) << code;
+ UNREACHABLE();
+ }
+}
+
+constexpr DexMemAccessType SGetOrSPutMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionSGetOrSPut(opcode));
+#endif
+ return (code >= Instruction::SPUT) ? SPutMemAccessType(code) : SGetMemAccessType(code);
+}
+
+constexpr DexMemAccessType AGetOrAPutMemAccessType(Instruction::Code code) {
+#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
+ DCHECK(IsInstructionAGetOrAPut(opcode));
+#endif
+ return (code >= Instruction::APUT) ? APutMemAccessType(code) : AGetMemAccessType(code);
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_DEX_INSTRUCTION_UTILS_H_
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 419d555..9e159c2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -655,7 +655,7 @@ void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
}
bool Heap::IsCompilingBoot() const {
- if (!Runtime::Current()->IsCompiler()) {
+ if (!Runtime::Current()->IsAotCompiler()) {
return false;
}
for (const auto& space : continuous_spaces_) {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index d873e6d..14f770d 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -788,7 +788,7 @@ OatFile* ImageSpace::OpenOatFile(const char* image_path, std::string* error_msg)
OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, image_header.GetOatDataBegin(),
image_header.GetOatFileBegin(),
- !Runtime::Current()->IsCompiler(), error_msg);
+ !Runtime::Current()->IsAotCompiler(), error_msg);
if (oat_file == NULL) {
*error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
oat_filename.c_str(), GetName(), error_msg->c_str());
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 90115c3..a054462 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -31,6 +31,8 @@
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc_root-inl.h"
#include "interpreter/interpreter.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
@@ -92,6 +94,16 @@ void Instrumentation::InstallStubsForClass(mirror::Class* klass) {
static void UpdateEntrypoints(mirror::ArtMethod* method, const void* quick_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Runtime* const runtime = Runtime::Current();
+ jit::Jit* jit = runtime->GetJit();
+ if (jit != nullptr) {
+ const void* old_code_ptr = method->GetEntryPointFromQuickCompiledCode();
+ jit::JitCodeCache* code_cache = jit->GetCodeCache();
+ if (code_cache->ContainsCodePtr(old_code_ptr)) {
+ // Save the old compiled code since we need it to implement ClassLinker::GetQuickOatCodeFor.
+ code_cache->SaveCompiledCode(method, old_code_ptr);
+ }
+ }
method->SetEntryPointFromQuickCompiledCode(quick_code);
if (!method->IsResolutionMethod()) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -120,7 +132,8 @@ void Instrumentation::InstallStubsForMethod(mirror::ArtMethod* method) {
}
const void* new_quick_code;
bool uninstall = !entry_exit_stubs_installed_ && !interpreter_stubs_installed_;
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* const class_linker = runtime->GetClassLinker();
bool is_class_initialized = method->GetDeclaringClass()->IsInitialized();
if (uninstall) {
if ((forced_interpret_only_ || IsDeoptimized(method)) && !method->IsNative()) {
@@ -143,7 +156,6 @@ void Instrumentation::InstallStubsForMethod(mirror::ArtMethod* method) {
new_quick_code = GetQuickInstrumentationEntryPoint();
} else {
new_quick_code = class_linker->GetQuickOatCodeFor(method);
- DCHECK(!class_linker->IsQuickToInterpreterBridge(new_quick_code));
}
} else {
new_quick_code = GetQuickResolutionStub();
@@ -397,6 +409,10 @@ void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t ev
method_unwind_listeners_.push_back(listener);
have_method_unwind_listeners_ = true;
}
+ if ((events & kBackwardBranch) != 0) {
+ backward_branch_listeners_.push_back(listener);
+ have_backward_branch_listeners_ = true;
+ }
if ((events & kDexPcMoved) != 0) {
std::list<InstrumentationListener*>* modified;
if (have_dex_pc_listeners_) {
@@ -904,6 +920,13 @@ void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_o
}
}
+void Instrumentation::BackwardBranchImpl(Thread* thread, mirror::ArtMethod* method,
+ int32_t offset) const {
+ for (InstrumentationListener* listener : backward_branch_listeners_) {
+ listener->BackwardBranch(thread, method, offset);
+ }
+}
+
void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
mirror::ArtField* field) const {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index cea0388..b667a40 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -94,6 +94,10 @@ struct InstrumentationListener {
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+
+ // Call-back for when we get a backward branch.
+ virtual void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
};
// Instrumentation is a catch-all for when extra information is required from the runtime. The
@@ -103,13 +107,14 @@ struct InstrumentationListener {
class Instrumentation {
public:
enum InstrumentationEvent {
- kMethodEntered = 1, // 1 << 0
- kMethodExited = 2, // 1 << 1
- kMethodUnwind = 4, // 1 << 2
- kDexPcMoved = 8, // 1 << 3
- kFieldRead = 16, // 1 << 4,
- kFieldWritten = 32, // 1 << 5
- kExceptionCaught = 64, // 1 << 6
+ kMethodEntered = 0x1,
+ kMethodExited = 0x2,
+ kMethodUnwind = 0x4,
+ kDexPcMoved = 0x8,
+ kFieldRead = 0x10,
+ kFieldWritten = 0x20,
+ kExceptionCaught = 0x40,
+ kBackwardBranch = 0x80,
};
Instrumentation();
@@ -244,6 +249,10 @@ class Instrumentation {
return have_exception_caught_listeners_;
}
+ bool HasBackwardBranchListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return have_backward_branch_listeners_;
+ }
+
bool IsActive() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
have_field_read_listeners_ || have_field_write_listeners_ ||
@@ -284,6 +293,14 @@ class Instrumentation {
}
}
+ // Inform listeners that a backward branch has been taken (only supported by the interpreter).
+ void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t offset) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (UNLIKELY(HasBackwardBranchListeners())) {
+ BackwardBranchImpl(thread, method, offset);
+ }
+ }
+
// Inform listeners that we read a field (only supported by the interpreter).
void FieldReadEvent(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
@@ -361,6 +378,8 @@ class Instrumentation {
void DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void BackwardBranchImpl(Thread* thread, mirror::ArtMethod* method, int32_t offset) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
mirror::ArtField* field) const
@@ -429,10 +448,14 @@ class Instrumentation {
// Do we have any exception caught listeners? Short-cut to avoid taking the instrumentation_lock_.
bool have_exception_caught_listeners_ GUARDED_BY(Locks::mutator_lock_);
+ // Do we have any backward branch listeners? Short-cut to avoid taking the instrumentation_lock_.
+ bool have_backward_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
+
// The event listeners, written to with the mutator_lock_ exclusively held.
std::list<InstrumentationListener*> method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
+ std::list<InstrumentationListener*> backward_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::shared_ptr<std::list<InstrumentationListener*>> dex_pc_listeners_
GUARDED_BY(Locks::mutator_lock_);
std::shared_ptr<std::list<InstrumentationListener*>> field_read_listeners_
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index e4b3247..37324ea 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -54,6 +54,12 @@ namespace interpreter {
#define UPDATE_HANDLER_TABLE() \
currentHandlersTable = handlersTable[Runtime::Current()->GetInstrumentation()->GetInterpreterHandlerTable()]
+#define BACKWARD_BRANCH_INSTRUMENTATION(offset) \
+ do { \
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); \
+ instrumentation->BackwardBranch(self, shadow_frame.GetMethod(), offset); \
+ } while (false)
+
#define UNREACHABLE_CODE_CHECK() \
do { \
if (kIsDebugBuild) { \
@@ -135,7 +141,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
}
};
- const bool do_assignability_check = do_access_check;
+ constexpr bool do_assignability_check = do_access_check;
if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
LOG(FATAL) << "Invalid shadow frame for interpreter use";
return JValue();
@@ -608,6 +614,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(GOTO) {
int8_t offset = inst->VRegA_10t(inst_data);
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -620,6 +627,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(GOTO_16) {
int16_t offset = inst->VRegA_20t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -632,6 +640,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(GOTO_32) {
int32_t offset = inst->VRegA_30t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -644,6 +653,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(PACKED_SWITCH) {
int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -656,6 +666,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(SPARSE_SWITCH) {
int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -758,6 +769,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -774,6 +786,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -790,6 +803,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -806,6 +820,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -822,6 +837,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -838,6 +854,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -854,6 +871,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -870,6 +888,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -886,6 +905,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -902,6 +922,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -918,6 +939,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
@@ -934,6 +956,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
int16_t offset = inst->VRegB_21t();
if (IsBackwardBranch(offset)) {
+ BACKWARD_BRANCH_INSTRUMENTATION(offset);
if (UNLIKELY(self->TestAllFlags())) {
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index ea7c192..08332d3 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -337,7 +337,8 @@ class JII {
thread_group = args->group;
}
- if (!runtime->AttachCurrentThread(thread_name, as_daemon, thread_group, !runtime->IsCompiler())) {
+ if (!runtime->AttachCurrentThread(thread_name, as_daemon, thread_group,
+ !runtime->IsAotCompiler())) {
*p_env = nullptr;
return JNI_ERR;
} else {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
new file mode 100644
index 0000000..539c181
--- /dev/null
+++ b/runtime/jit/jit.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit.h"
+
+#include <dlfcn.h>
+
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "interpreter/interpreter.h"
+#include "jit_code_cache.h"
+#include "jit_instrumentation.h"
+#include "mirror/art_method-inl.h"
+#include "runtime.h"
+#include "runtime_options.h"
+#include "thread_list.h"
+#include "utils.h"
+
+namespace art {
+namespace jit {
+
+JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
+ if (!options.GetOrDefault(RuntimeArgumentMap::UseJIT)) {
+ return nullptr;
+ }
+ auto* jit_options = new JitOptions;
+ jit_options->code_cache_capacity_ =
+ options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheCapacity);
+ jit_options->compile_threshold_ =
+ options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
+ return jit_options;
+}
+
+Jit::Jit()
+ : jit_library_handle_(nullptr), jit_compiler_handle_(nullptr), jit_load_(nullptr),
+ jit_compile_method_(nullptr) {
+}
+
+Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
+ std::unique_ptr<Jit> jit(new Jit);
+ if (!jit->LoadCompiler(error_msg)) {
+ return nullptr;
+ }
+ jit->code_cache_.reset(JitCodeCache::Create(options->GetCodeCacheCapacity(), error_msg));
+ if (jit->GetCodeCache() == nullptr) {
+ return nullptr;
+ }
+ LOG(INFO) << "JIT created with code_cache_capacity="
+ << PrettySize(options->GetCodeCacheCapacity())
+ << " compile_threshold=" << options->GetCompileThreshold();
+ return jit.release();
+}
+
+bool Jit::LoadCompiler(std::string* error_msg) {
+ jit_library_handle_ = dlopen(
+ kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW);
+ if (jit_library_handle_ == nullptr) {
+ std::ostringstream oss;
+ oss << "JIT could not load libart-compiler.so: " << dlerror();
+ *error_msg = oss.str();
+ return false;
+ }
+ jit_load_ = reinterpret_cast<void* (*)(CompilerCallbacks**)>(
+ dlsym(jit_library_handle_, "jit_load"));
+ if (jit_load_ == nullptr) {
+ dlclose(jit_library_handle_);
+ *error_msg = "JIT couldn't find jit_load entry point";
+ return false;
+ }
+ jit_unload_ = reinterpret_cast<void (*)(void*)>(
+ dlsym(jit_library_handle_, "jit_unload"));
+ if (jit_unload_ == nullptr) {
+ dlclose(jit_library_handle_);
+ *error_msg = "JIT couldn't find jit_unload entry point";
+ return false;
+ }
+ jit_compile_method_ = reinterpret_cast<bool (*)(void*, mirror::ArtMethod*, Thread*)>(
+ dlsym(jit_library_handle_, "jit_compile_method"));
+ if (jit_compile_method_ == nullptr) {
+ dlclose(jit_library_handle_);
+ *error_msg = "JIT couldn't find jit_compile_method entry point";
+ return false;
+ }
+ CompilerCallbacks* callbacks = nullptr;
+ VLOG(jit) << "Calling JitLoad interpreter_only="
+ << Runtime::Current()->GetInstrumentation()->InterpretOnly();
+ jit_compiler_handle_ = (jit_load_)(&callbacks);
+ if (jit_compiler_handle_ == nullptr) {
+ dlclose(jit_library_handle_);
+ *error_msg = "JIT couldn't load compiler";
+ return false;
+ }
+ if (callbacks == nullptr) {
+ dlclose(jit_library_handle_);
+ *error_msg = "JIT compiler callbacks were not set";
+ jit_compiler_handle_ = nullptr;
+ return false;
+ }
+ compiler_callbacks_ = callbacks;
+ return true;
+}
+
+bool Jit::CompileMethod(mirror::ArtMethod* method, Thread* self) {
+ DCHECK(!method->IsRuntimeMethod());
+ const bool result = jit_compile_method_(jit_compiler_handle_, method, self);
+ if (result) {
+ method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
+ }
+ return result;
+}
+
+void Jit::CreateThreadPool() {
+ CHECK(instrumentation_cache_.get() != nullptr);
+ instrumentation_cache_->CreateThreadPool();
+}
+
+void Jit::DeleteThreadPool() {
+ if (instrumentation_cache_.get() != nullptr) {
+ instrumentation_cache_->DeleteThreadPool();
+ }
+}
+
+Jit::~Jit() {
+ DeleteThreadPool();
+ if (jit_compiler_handle_ != nullptr) {
+ jit_unload_(jit_compiler_handle_);
+ }
+ if (jit_library_handle_ != nullptr) {
+ dlclose(jit_library_handle_);
+ }
+}
+
+void Jit::CreateInstrumentationCache(size_t compile_threshold) {
+ CHECK_GT(compile_threshold, 0U);
+ Runtime* const runtime = Runtime::Current();
+ runtime->GetThreadList()->SuspendAll();
+ // Add Jit interpreter instrumentation, tells the interpreter when to notify the jit to compile
+ // something.
+ instrumentation_cache_.reset(new jit::JitInstrumentationCache(compile_threshold));
+ runtime->GetInstrumentation()->AddListener(
+ new jit::JitInstrumentationListener(instrumentation_cache_.get()),
+ instrumentation::Instrumentation::kMethodEntered |
+ instrumentation::Instrumentation::kBackwardBranch);
+ runtime->GetThreadList()->ResumeAll();
+}
+
+} // namespace jit
+} // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
new file mode 100644
index 0000000..b80015f
--- /dev/null
+++ b/runtime/jit/jit.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_H_
+#define ART_RUNTIME_JIT_JIT_H_
+
+#include <unordered_map>
+
+#include "instrumentation.h"
+
+#include "atomic.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc_root.h"
+#include "jni.h"
+#include "object_callbacks.h"
+#include "thread_pool.h"
+
+namespace art {
+
+class CompilerCallbacks;
+struct RuntimeArgumentMap;
+
+namespace jit {
+
+class JitCodeCache;
+class JitInstrumentationCache;
+class JitOptions;
+
+class Jit {
+ public:
+ static constexpr bool kStressMode = kIsDebugBuild;
+ static constexpr size_t kDefaultCompileThreshold = kStressMode ? 1 : 1000;
+
+ virtual ~Jit();
+ static Jit* Create(JitOptions* options, std::string* error_msg);
+ bool CompileMethod(mirror::ArtMethod* method, Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CreateInstrumentationCache(size_t compile_threshold);
+ void CreateThreadPool();
+ CompilerCallbacks* GetCompilerCallbacks() {
+ return compiler_callbacks_;
+ }
+ const JitCodeCache* GetCodeCache() const {
+ return code_cache_.get();
+ }
+ JitCodeCache* GetCodeCache() {
+ return code_cache_.get();
+ }
+ void DeleteThreadPool();
+
+ private:
+ Jit();
+ bool LoadCompiler(std::string* error_msg);
+
+ // JIT compiler
+ void* jit_library_handle_;
+ void* jit_compiler_handle_;
+ void* (*jit_load_)(CompilerCallbacks**);
+ void (*jit_unload_)(void*);
+ bool (*jit_compile_method_)(void*, mirror::ArtMethod*, Thread*);
+
+ std::unique_ptr<jit::JitInstrumentationCache> instrumentation_cache_;
+ std::unique_ptr<jit::JitCodeCache> code_cache_;
+ CompilerCallbacks* compiler_callbacks_; // Owned by the jit compiler.
+};
+
+class JitOptions {
+ public:
+ static JitOptions* CreateFromRuntimeArguments(const RuntimeArgumentMap& options);
+ size_t GetCompileThreshold() const {
+ return compile_threshold_;
+ }
+ size_t GetCodeCacheCapacity() const {
+ return code_cache_capacity_;
+ }
+
+ private:
+ size_t code_cache_capacity_;
+ size_t compile_threshold_;
+
+ JitOptions() : code_cache_capacity_(0), compile_threshold_(0) {
+ }
+};
+
+} // namespace jit
+} // namespace art
+
+#endif // ART_RUNTIME_JIT_JIT_H_
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
new file mode 100644
index 0000000..8d4965e
--- /dev/null
+++ b/runtime/jit/jit_code_cache.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit_code_cache.h"
+
+#include <sstream>
+
+#include "mem_map.h"
+#include "mirror/art_method-inl.h"
+#include "oat_file-inl.h"
+
+namespace art {
+namespace jit {
+
+JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
+ CHECK_GT(capacity, 0U);
+ CHECK_LT(capacity, kMaxCapacity);
+ std::string error_str;
+ // Map name specific for android_os_Debug.cpp accounting.
+ MemMap* map = MemMap::MapAnonymous("jit-code-cache", nullptr, capacity,
+ PROT_READ | PROT_WRITE | PROT_EXEC, false, &error_str);
+ if (map == nullptr) {
+ std::ostringstream oss;
+ oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
+ *error_msg = oss.str();
+ return nullptr;
+ }
+ return new JitCodeCache(map);
+}
+
+JitCodeCache::JitCodeCache(MemMap* mem_map)
+ : lock_("Jit code cache", kJitCodeCacheLock), num_methods_(0) {
+ VLOG(jit) << "Created jit code cache size=" << PrettySize(mem_map->Size());
+ mem_map_.reset(mem_map);
+ uint8_t* divider = mem_map->Begin() + RoundUp(mem_map->Size() / 4, kPageSize);
+ // Data cache is 1 / 4 of the map. TODO: Make this variable?
+ // Put data at the start.
+ data_cache_ptr_ = mem_map->Begin();
+ data_cache_end_ = divider;
+ data_cache_begin_ = data_cache_ptr_;
+ mprotect(data_cache_ptr_, data_cache_end_ - data_cache_begin_, PROT_READ | PROT_WRITE);
+ // Code cache after.
+ code_cache_begin_ = divider;
+ code_cache_ptr_ = divider;
+ code_cache_end_ = mem_map->End();
+}
+
+bool JitCodeCache::ContainsMethod(mirror::ArtMethod* method) const {
+ return ContainsCodePtr(method->GetEntryPointFromQuickCompiledCode());
+}
+
+bool JitCodeCache::ContainsCodePtr(const void* ptr) const {
+ return ptr >= code_cache_begin_ && ptr < code_cache_end_;
+}
+
+void JitCodeCache::FlushInstructionCache() {
+ UNIMPLEMENTED(FATAL);
+ // TODO: Investigate if we need to do this.
+ // __clear_cache(reinterpret_cast<char*>(code_cache_begin_), static_cast<int>(CodeCacheSize()));
+}
+
+uint8_t* JitCodeCache::ReserveCode(Thread* self, size_t size) {
+ MutexLock mu(self, lock_);
+ if (size > CodeCacheRemain()) {
+ return nullptr;
+ }
+ code_cache_ptr_ += size;
+ return code_cache_ptr_ - size;
+}
+
+uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
+ MutexLock mu(self, lock_);
+ const size_t size = end - begin;
+ if (size > DataCacheRemain()) {
+ return nullptr; // Out of space in the data cache.
+ }
+ std::copy(begin, end, data_cache_ptr_);
+ data_cache_ptr_ += size;
+ return data_cache_ptr_ - size;
+}
+
+const void* JitCodeCache::GetCodeFor(mirror::ArtMethod* method) {
+ const void* code = method->GetEntryPointFromQuickCompiledCode();
+ if (ContainsCodePtr(code)) {
+ return code;
+ }
+ MutexLock mu(Thread::Current(), lock_);
+ auto it = method_code_map_.find(method);
+ if (it != method_code_map_.end()) {
+ return it->second;
+ }
+ return nullptr;
+}
+
+void JitCodeCache::SaveCompiledCode(mirror::ArtMethod* method, const void* old_code_ptr) {
+ DCHECK_EQ(method->GetEntryPointFromQuickCompiledCode(), old_code_ptr);
+ DCHECK(ContainsCodePtr(old_code_ptr)) << PrettyMethod(method) << " old_code_ptr="
+ << old_code_ptr;
+ MutexLock mu(Thread::Current(), lock_);
+ auto it = method_code_map_.find(method);
+ if (it != method_code_map_.end()) {
+ return;
+ }
+ method_code_map_.Put(method, old_code_ptr);
+}
+
+} // namespace jit
+} // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
new file mode 100644
index 0000000..aa8c717
--- /dev/null
+++ b/runtime/jit/jit_code_cache.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
+#define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
+
+#include "instrumentation.h"
+
+#include "atomic.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc_root.h"
+#include "jni.h"
+#include "oat_file.h"
+#include "object_callbacks.h"
+#include "safe_map.h"
+#include "thread_pool.h"
+
+namespace art {
+
+class CompiledMethod;
+class CompilerCallbacks;
+
+namespace mirror {
+class ArtMethod;
+} // namespcae mirror
+
+namespace jit {
+
+class JitInstrumentationCache;
+
+class JitCodeCache {
+ public:
+ static constexpr size_t kMaxCapacity = 1 * GB;
+ static constexpr size_t kDefaultCapacity = 2 * MB;
+
+ static JitCodeCache* Create(size_t capacity, std::string* error_msg);
+
+ const uint8_t* CodeCachePtr() const {
+ return code_cache_ptr_;
+ }
+ size_t CodeCacheSize() const {
+ return code_cache_ptr_ - code_cache_begin_;
+ }
+ size_t CodeCacheRemain() const {
+ return code_cache_end_ - code_cache_ptr_;
+ }
+ size_t DataCacheSize() const {
+ return data_cache_ptr_ - data_cache_begin_;
+ }
+ size_t DataCacheRemain() const {
+ return data_cache_end_ - data_cache_ptr_;
+ }
+ size_t NumMethods() const {
+ return num_methods_;
+ }
+
+ bool ContainsMethod(mirror::ArtMethod* method) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool ContainsCodePtr(const void* ptr) const;
+
+ uint8_t* ReserveCode(Thread* self, size_t size) LOCKS_EXCLUDED(lock_);
+
+ uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
+ LOCKS_EXCLUDED(lock_);
+
+ // Get code for a method, returns null if it is not in the jit cache.
+ const void* GetCodeFor(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+
+ void SaveCompiledCode(mirror::ArtMethod* method, const void* old_code_ptr)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+
+ private:
+ // Takes ownership of code_mem_map.
+ explicit JitCodeCache(MemMap* code_mem_map);
+ void FlushInstructionCache();
+
+ Mutex lock_;
+ // Mem map which holds code and data. We do this since we need to have 32 bit offsets from method
+ // headers in code cache which point to things in the data cache. If the maps are more than 4GB
+ // apart, having multiple maps wouldn't work.
+ std::unique_ptr<MemMap> mem_map_;
+ // Code cache section.
+ uint8_t* code_cache_ptr_;
+ const uint8_t* code_cache_begin_;
+ const uint8_t* code_cache_end_;
+ // Data cache section.
+ uint8_t* data_cache_ptr_;
+ const uint8_t* data_cache_begin_;
+ const uint8_t* data_cache_end_;
+ size_t num_methods_;
+ // TODO: This relies on methods not moving.
+ // This map holds code for methods if they were deoptimized by the instrumentation stubs. This is
+ // required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
+ SafeMap<mirror::ArtMethod*, const void*> method_code_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
+};
+
+
+} // namespace jit
+} // namespace art
+
+#endif // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
new file mode 100644
index 0000000..160e678
--- /dev/null
+++ b/runtime/jit/jit_instrumentation.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit_instrumentation.h"
+
+#include "jit.h"
+#include "jit_code_cache.h"
+#include "mirror/art_method-inl.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+namespace jit {
+
+class JitCompileTask : public Task {
+ public:
+ explicit JitCompileTask(mirror::ArtMethod* method, JitInstrumentationCache* cache)
+ : method_(method), cache_(cache) {
+ }
+
+ virtual void Run(Thread* self) OVERRIDE {
+ ScopedObjectAccess soa(self);
+ VLOG(jit) << "JitCompileTask compiling method " << PrettyMethod(method_);
+ if (Runtime::Current()->GetJit()->CompileMethod(method_, self)) {
+ cache_->SignalCompiled(self, method_);
+ } else {
+ VLOG(jit) << "Failed to compile method " << PrettyMethod(method_);
+ }
+ }
+
+ virtual void Finalize() OVERRIDE {
+ delete this;
+ }
+
+ private:
+ mirror::ArtMethod* const method_;
+ JitInstrumentationCache* const cache_;
+};
+
+JitInstrumentationCache::JitInstrumentationCache(size_t hot_method_threshold)
+ : lock_("jit instrumentation lock"), hot_method_threshold_(hot_method_threshold) {
+}
+
+void JitInstrumentationCache::CreateThreadPool() {
+ thread_pool_.reset(new ThreadPool("Jit thread pool", 1));
+}
+
+void JitInstrumentationCache::DeleteThreadPool() {
+ thread_pool_.reset();
+}
+
+void JitInstrumentationCache::SignalCompiled(Thread* self, mirror::ArtMethod* method) {
+ ScopedObjectAccessUnchecked soa(self);
+ jmethodID method_id = soa.EncodeMethod(method);
+ MutexLock mu(self, lock_);
+ auto it = samples_.find(method_id);
+ if (it != samples_.end()) {
+ samples_.erase(it);
+ }
+}
+
+void JitInstrumentationCache::AddSamples(Thread* self, mirror::ArtMethod* method, size_t count) {
+ ScopedObjectAccessUnchecked soa(self);
+ // Since we don't have on-stack replacement, some methods can remain in the interpreter longer
+ // than we want resulting in samples even after the method is compiled.
+ if (method->IsClassInitializer() ||
+ Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method)) {
+ return;
+ }
+ jmethodID method_id = soa.EncodeMethod(method);
+ bool is_hot = false;
+ {
+ MutexLock mu(self, lock_);
+ size_t sample_count = 0;
+ auto it = samples_.find(method_id);
+ if (it != samples_.end()) {
+ it->second += count;
+ sample_count = it->second;
+ } else {
+ sample_count = count;
+ samples_.insert(std::make_pair(method_id, count));
+ }
+ // If we have enough samples, mark as hot and request Jit compilation.
+ if (sample_count >= hot_method_threshold_ && sample_count - count < hot_method_threshold_) {
+ is_hot = true;
+ }
+ }
+ if (is_hot) {
+ if (thread_pool_.get() != nullptr) {
+ thread_pool_->AddTask(self, new JitCompileTask(method->GetInterfaceMethodIfProxy(), this));
+ thread_pool_->StartWorkers(self);
+ } else {
+ VLOG(jit) << "Compiling hot method " << PrettyMethod(method);
+ Runtime::Current()->GetJit()->CompileMethod(method->GetInterfaceMethodIfProxy(), self);
+ }
+ }
+}
+
+JitInstrumentationListener::JitInstrumentationListener(JitInstrumentationCache* cache)
+ : instrumentation_cache_(cache) {
+ CHECK(instrumentation_cache_ != nullptr);
+}
+
+} // namespace jit
+} // namespace art
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
new file mode 100644
index 0000000..9576f4b
--- /dev/null
+++ b/runtime/jit/jit_instrumentation.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_INSTRUMENTATION_H_
+#define ART_RUNTIME_JIT_JIT_INSTRUMENTATION_H_
+
+#include <unordered_map>
+
+#include "instrumentation.h"
+
+#include "atomic.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc_root.h"
+#include "jni.h"
+#include "object_callbacks.h"
+#include "thread_pool.h"
+
+namespace art {
+namespace mirror {
+ class ArtField;
+ class ArtMethod;
+ class Class;
+ class Object;
+ class Throwable;
+} // namespace mirror
+union JValue;
+class Thread;
+class ThrowLocation;
+
+namespace jit {
+
+// Keeps track of which methods are hot.
+class JitInstrumentationCache {
+ public:
+ explicit JitInstrumentationCache(size_t hot_method_threshold);
+ void AddSamples(Thread* self, mirror::ArtMethod* method, size_t samples)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SignalCompiled(Thread* self, mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void CreateThreadPool();
+ void DeleteThreadPool();
+
+ private:
+ Mutex lock_;
+ std::unordered_map<jmethodID, size_t> samples_;
+ size_t hot_method_threshold_;
+ std::unique_ptr<ThreadPool> thread_pool_;
+};
+
+class JitInstrumentationListener : public instrumentation::InstrumentationListener {
+ public:
+ explicit JitInstrumentationListener(JitInstrumentationCache* cache);
+
+ virtual void MethodEntered(Thread* thread, mirror::Object* /*this_object*/,
+ mirror::ArtMethod* method, uint32_t /*dex_pc*/)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ instrumentation_cache_->AddSamples(thread, method, 1);
+ }
+ virtual void MethodExited(Thread* /*thread*/, mirror::Object* /*this_object*/,
+ mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+ const JValue& /*return_value*/)
+ OVERRIDE { }
+ virtual void MethodUnwind(Thread* /*thread*/, mirror::Object* /*this_object*/,
+ mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/) OVERRIDE { }
+ virtual void FieldRead(Thread* /*thread*/, mirror::Object* /*this_object*/,
+ mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+ mirror::ArtField* /*field*/) OVERRIDE { }
+ virtual void FieldWritten(Thread* /*thread*/, mirror::Object* /*this_object*/,
+ mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
+ mirror::ArtField* /*field*/, const JValue& /*field_value*/)
+ OVERRIDE { }
+ virtual void ExceptionCaught(Thread* /*thread*/, const ThrowLocation& /*throw_location*/,
+ mirror::ArtMethod* /*catch_method*/, uint32_t /*catch_dex_pc*/,
+ mirror::Throwable* /*exception_object*/) OVERRIDE { }
+
+ virtual void DexPcMoved(Thread* /*self*/, mirror::Object* /*this_object*/,
+ mirror::ArtMethod* /*method*/, uint32_t /*new_dex_pc*/) OVERRIDE { }
+
+ // We only care about how many dex instructions were executed in the Jit.
+ virtual void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK_LE(dex_pc_offset, 0);
+ instrumentation_cache_->AddSamples(thread, method, 1);
+ }
+
+ private:
+ JitInstrumentationCache* const instrumentation_cache_;
+};
+
+} // namespace jit
+} // namespace art
+
+#endif // ART_RUNTIME_JIT_JIT_INSTRUMENTATION_H_
diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc
index 5a4ebd1..3cea4a1 100644
--- a/runtime/mirror/art_field.cc
+++ b/runtime/mirror/art_field.cc
@@ -44,7 +44,7 @@ void ArtField::ResetClass() {
void ArtField::SetOffset(MemberOffset num_bytes) {
DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
- if (kIsDebugBuild && Runtime::Current()->IsCompiler() &&
+ if (kIsDebugBuild && Runtime::Current()->IsAotCompiler() &&
!Runtime::Current()->UseCompileTimeClassPath()) {
Primitive::Type type = GetTypeAsPrimitiveType();
if (type == Primitive::kPrimDouble || type == Primitive::kPrimLong) {
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 7d31148..c27c6e9 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -147,7 +147,10 @@ inline mirror::Class* ArtMethod::GetClassFromTypeIndex(uint16_t type_idx, bool r
inline uint32_t ArtMethod::GetCodeSize() {
DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this);
- const void* code = EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode());
+ return GetCodeSize(EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode()));
+}
+
+inline uint32_t ArtMethod::GetCodeSize(const void* code) {
if (code == nullptr) {
return 0u;
}
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index b2016dc..6259745 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -27,6 +27,8 @@
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/card_table-inl.h"
#include "interpreter/interpreter.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "jni_internal.h"
#include "mapping_table.h"
#include "object_array-inl.h"
@@ -229,6 +231,7 @@ uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
if (abort_on_failure) {
LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
<< "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
+ << " current entry_point=" << GetQuickOatEntryPoint(sizeof(void*))
<< ") in " << PrettyMethod(this);
}
return DexFile::kDexNoIndex;
@@ -329,6 +332,13 @@ void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
class_linker->IsQuickResolutionStub(code)) {
return;
}
+ // If we are the JIT then we may have just compiled the method after the
+ // IsQuickToInterpreterBridge check.
+ jit::Jit* const jit = Runtime::Current()->GetJit();
+ if (jit != nullptr &&
+ jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
+ return;
+ }
/*
* During a stack walk, a return PC may point past-the-end of the code
* in the case that the last instruction is a call that isn't expected to
@@ -336,11 +346,11 @@ void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
*
* NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
*/
- CHECK(PcIsWithinQuickCode(pc))
+ CHECK(PcIsWithinQuickCode(reinterpret_cast<uintptr_t>(code), pc))
<< PrettyMethod(this)
<< " pc=" << std::hex << pc
<< " code=" << code
- << " size=" << GetCodeSize();
+ << " size=" << GetCodeSize(reinterpret_cast<const void*>(code));
}
bool ArtMethod::IsEntrypointInterpreter() {
@@ -410,7 +420,8 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
}
// Ensure that we won't be accidentally calling quick compiled code when -Xint.
- if (kIsDebugBuild && Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly()) {
+ if (kIsDebugBuild && runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
+ DCHECK(!runtime->UseJit());
CHECK(IsEntrypointInterpreter())
<< "Don't call compiled code when -Xint " << PrettyMethod(this);
}
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index f33ca94..019fdcd 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -305,18 +305,8 @@ class MANAGED ArtMethod FINAL : public Object {
// quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for
// debug purposes.
bool PcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uintptr_t code = reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode());
- if (code == 0) {
- return pc == 0;
- }
- /*
- * During a stack walk, a return PC may point past-the-end of the code
- * in the case that the last instruction is a call that isn't expected to
- * return. Thus, we check <= code + GetCodeSize().
- *
- * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
- */
- return code <= pc && pc <= code + GetCodeSize();
+ return PcIsWithinQuickCode(
+ reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode()), pc);
}
void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -622,6 +612,23 @@ class MANAGED ArtMethod FINAL : public Object {
return offset;
}
+ // Code points to the start of the quick code.
+ static uint32_t GetCodeSize(const void* code);
+
+ static bool PcIsWithinQuickCode(uintptr_t code, uintptr_t pc) {
+ if (code == 0) {
+ return pc == 0;
+ }
+ /*
+ * During a stack walk, a return PC may point past-the-end of the code
+ * in the case that the last instruction is a call that isn't expected to
+ * return. Thus, we check <= code + GetCodeSize().
+ *
+ * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
+ */
+ return code <= pc && pc <= code + GetCodeSize(reinterpret_cast<const void*>(code));
+ }
+
friend struct art::ArtMethodOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod);
};
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 9061bb3..3192e03 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -577,12 +577,12 @@ const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index)
}
if (oat_file_->IsExecutable() ||
Runtime::Current() == nullptr || // This case applies for oatdump.
- Runtime::Current()->IsCompiler()) {
+ Runtime::Current()->IsAotCompiler()) {
return OatMethod(oat_file_->Begin(), oat_method_offsets->code_offset_);
- } else {
- // We aren't allowed to use the compiled code. We just force it down the interpreted version.
- return OatMethod(oat_file_->Begin(), 0);
}
+ // We aren't allowed to use the compiled code. We just force it down the interpreted / jit
+ // version.
+ return OatMethod(oat_file_->Begin(), 0);
}
void OatFile::OatMethod::LinkMethod(mirror::ArtMethod* method) const {
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 6ae3c3e..5e68439 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -147,8 +147,8 @@ class OatFile {
return reinterpret_cast<T>(begin_ + offset);
}
- const uint8_t* const begin_;
- const uint32_t code_offset_;
+ const uint8_t* begin_;
+ uint32_t code_offset_;
friend class OatClass;
};
diff --git a/runtime/object_lock.cc b/runtime/object_lock.cc
index f7accc0..749fb5d 100644
--- a/runtime/object_lock.cc
+++ b/runtime/object_lock.cc
@@ -47,6 +47,7 @@ void ObjectLock<T>::NotifyAll() {
obj_->NotifyAll(self_);
}
+template class ObjectLock<mirror::ArtMethod>;
template class ObjectLock<mirror::Class>;
template class ObjectLock<mirror::Object>;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index c0c7baa..9d87ed7 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -146,6 +146,15 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.Define({"-XX:EnableHSpaceCompactForOOM", "-XX:DisableHSpaceCompactForOOM"})
.WithValues({true, false})
.IntoKey(M::EnableHSpaceCompactForOOM)
+ .Define({"-Xjit", "-Xnojit"})
+ .WithValues({true, false})
+ .IntoKey(M::UseJIT)
+ .Define("-Xjitcodecachesize:_")
+ .WithType<MemoryKiB>()
+ .IntoKey(M::JITCodeCacheCapacity)
+ .Define("-Xjitthreshold:_")
+ .WithType<unsigned int>()
+ .IntoKey(M::JITCompileThreshold)
.Define("-XX:HspaceCompactForOOMMinIntervalMs=_") // in ms
.WithType<MillisecondsToNanoseconds>() // store as ns
.IntoKey(M::HSpaceCompactForOOMMinIntervalsMs)
@@ -248,7 +257,7 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
"-Xdexopt:_", "-Xnoquithandler", "-Xjnigreflimit:_", "-Xgenregmap", "-Xnogenregmap",
"-Xverifyopt:_", "-Xcheckdexsum", "-Xincludeselectedop", "-Xjitop:_",
- "-Xincludeselectedmethod", "-Xjitthreshold:_", "-Xjitcodecachesize:_",
+ "-Xincludeselectedmethod", "-Xjitthreshold:_",
"-Xjitblocking", "-Xjitmethod:_", "-Xjitclass:_", "-Xjitoffset:_",
"-Xjitconfig:_", "-Xjitcheckcg", "-Xjitverbose", "-Xjitprofile",
"-Xjitdisableopt", "-Xjitsuspendpoll", "-XX:mainThreadStackSize=_"})
@@ -353,19 +362,20 @@ bool ParsedOptions::ProcessSpecialOptions(const RuntimeOptions& options,
bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognized,
RuntimeArgumentMap* runtime_options) {
-// gLogVerbosity.class_linker = true; // TODO: don't check this in!
-// gLogVerbosity.compiler = true; // TODO: don't check this in!
-// gLogVerbosity.gc = true; // TODO: don't check this in!
-// gLogVerbosity.heap = true; // TODO: don't check this in!
-// gLogVerbosity.jdwp = true; // TODO: don't check this in!
-// gLogVerbosity.jni = true; // TODO: don't check this in!
-// gLogVerbosity.monitor = true; // TODO: don't check this in!
-// gLogVerbosity.profiler = true; // TODO: don't check this in!
-// gLogVerbosity.signals = true; // TODO: don't check this in!
-// gLogVerbosity.startup = true; // TODO: don't check this in!
-// gLogVerbosity.third_party_jni = true; // TODO: don't check this in!
-// gLogVerbosity.threads = true; // TODO: don't check this in!
-// gLogVerbosity.verifier = true; // TODO: don't check this in!
+ // gLogVerbosity.class_linker = true; // TODO: don't check this in!
+ // gLogVerbosity.compiler = true; // TODO: don't check this in!
+ // gLogVerbosity.gc = true; // TODO: don't check this in!
+ // gLogVerbosity.heap = true; // TODO: don't check this in!
+ // gLogVerbosity.jdwp = true; // TODO: don't check this in!
+ // gLogVerbosity.jit = true; // TODO: don't check this in!
+ // gLogVerbosity.jni = true; // TODO: don't check this in!
+ // gLogVerbosity.monitor = true; // TODO: don't check this in!
+ // gLogVerbosity.profiler = true; // TODO: don't check this in!
+ // gLogVerbosity.signals = true; // TODO: don't check this in!
+ // gLogVerbosity.startup = true; // TODO: don't check this in!
+ // gLogVerbosity.third_party_jni = true; // TODO: don't check this in!
+ // gLogVerbosity.threads = true; // TODO: don't check this in!
+ // gLogVerbosity.verifier = true; // TODO: don't check this in!
for (size_t i = 0; i < options.size(); ++i) {
if (true && options[0].first == "-Xzygote") {
@@ -560,7 +570,7 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, "The following standard options are supported:\n");
UsageMessage(stream, " -classpath classpath (-cp classpath)\n");
UsageMessage(stream, " -Dproperty=value\n");
- UsageMessage(stream, " -verbose:tag ('gc', 'jni', or 'class')\n");
+ UsageMessage(stream, " -verbose:tag ('gc', 'jit', 'jni', or 'class')\n");
UsageMessage(stream, " -showversion\n");
UsageMessage(stream, " -help\n");
UsageMessage(stream, " -agentlib:jdwp=options\n");
@@ -590,6 +600,8 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -XX:ForegroundHeapGrowthMultiplier=doublevalue\n");
UsageMessage(stream, " -XX:LowMemoryMode\n");
UsageMessage(stream, " -Xprofile:{threadcpuclock,wallclock,dualclock}\n");
+ UsageMessage(stream, " -Xjitcodecachesize:N\n");
+ UsageMessage(stream, " -Xjitthreshold:integervalue\n");
UsageMessage(stream, "\n");
UsageMessage(stream, "The following unique to ART options are supported:\n");
@@ -630,6 +642,8 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -Xcompiler-option dex2oat-option\n");
UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
UsageMessage(stream, " -Xpatchoat:filename\n");
+ UsageMessage(stream, " -Xjit\n");
+ UsageMessage(stream, " -Xnojit\n");
UsageMessage(stream, " -X[no]relocate\n");
UsageMessage(stream, " -X[no]dex2oat (Whether to invoke dex2oat on the application)\n");
UsageMessage(stream, " -X[no]image-dex2oat (Whether to create and use a boot image)\n");
@@ -659,8 +673,6 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -Xincludeselectedop\n");
UsageMessage(stream, " -Xjitop:hexopvalue[-endvalue][,hexopvalue[-endvalue]]*\n");
UsageMessage(stream, " -Xincludeselectedmethod\n");
- UsageMessage(stream, " -Xjitthreshold:integervalue\n");
- UsageMessage(stream, " -Xjitcodecachesize:decimalvalueofkbytes\n");
UsageMessage(stream, " -Xjitblocking\n");
UsageMessage(stream, " -Xjitmethod:signature[,signature]* (eg Ljava/lang/String\\;replace)\n");
UsageMessage(stream, " -Xjitclass:classname[,classname]*\n");
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index c3bdcb1..db372c3 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -161,7 +161,7 @@ void* BackgroundMethodSamplingProfiler::RunProfilerThread(void* arg) {
CHECK(runtime->AttachCurrentThread("Profiler", true, runtime->GetSystemThreadGroup(),
- !runtime->IsCompiler()));
+ !runtime->IsAotCompiler()));
Thread* self = Thread::Current();
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index d65b2d5..44e2844 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -77,7 +77,9 @@ bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier,
InlineMethod* method) {
DCHECK(verifier != nullptr);
DCHECK_EQ(Runtime::Current()->IsCompiler(), method != nullptr);
- DCHECK_EQ(verifier->CanLoadClasses(), method != nullptr);
+ if (!Runtime::Current()->UseJit()) {
+ DCHECK_EQ(verifier->CanLoadClasses(), method != nullptr);
+ }
// We currently support only plain return or 2-instruction methods.
const DexFile::CodeItem* code_item = verifier->CodeItem();
@@ -110,6 +112,10 @@ bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier,
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT:
case Instruction::IGET_WIDE:
+ // TODO: Add handling for JIT.
+ // case Instruction::IGET_QUICK:
+ // case Instruction::IGET_WIDE_QUICK:
+ // case Instruction::IGET_OBJECT_QUICK:
return AnalyseIGetMethod(verifier, method);
case Instruction::IPUT:
case Instruction::IPUT_OBJECT:
@@ -118,6 +124,10 @@ bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier,
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT:
case Instruction::IPUT_WIDE:
+ // TODO: Add handling for JIT.
+ // case Instruction::IPUT_QUICK:
+ // case Instruction::IPUT_WIDE_QUICK:
+ // case Instruction::IPUT_OBJECT_QUICK:
return AnalyseIPutMethod(verifier, method);
default:
return false;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 8cd9e24..8c5827c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -65,6 +65,8 @@
#include "image.h"
#include "instrumentation.h"
#include "intern_table.h"
+#include "interpreter/interpreter.h"
+#include "jit/jit.h"
#include "jni_internal.h"
#include "mirror/array.h"
#include "mirror/art_field-inl.h"
@@ -229,6 +231,12 @@ Runtime::~Runtime() {
// Make sure to let the GC complete if it is running.
heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
heap_->DeleteThreadPool();
+ if (jit_.get() != nullptr) {
+ VLOG(jit) << "Deleting jit thread pool";
+ // Delete thread pool before the thread list since we don't want to wait forever on the
+ // JIT compiler threads.
+ jit_->DeleteThreadPool();
+ }
// Make sure our internal threads are dead before we start tearing down things they're using.
Dbg::StopJdwp();
@@ -237,6 +245,13 @@ Runtime::~Runtime() {
// Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
delete thread_list_;
+ // Delete the JIT after thread list to ensure that there is no remaining threads which could be
+ // accessing the instrumentation when we delete it.
+ if (jit_.get() != nullptr) {
+ VLOG(jit) << "Deleting jit";
+ jit_.reset(nullptr);
+ }
+
// Shutdown the fault manager if it was initialized.
fault_manager.Shutdown();
@@ -461,17 +476,24 @@ bool Runtime::Start() {
started_ = true;
- // Use !IsCompiler so that we get test coverage, tests are never the zygote.
- if (!IsCompiler()) {
+ // Use !IsAotCompiler so that we get test coverage, tests are never the zygote.
+ if (!IsAotCompiler()) {
ScopedObjectAccess soa(self);
gc::space::ImageSpace* image_space = heap_->GetImageSpace();
if (image_space != nullptr) {
- Runtime::Current()->GetInternTable()->AddImageStringsToTable(image_space);
- Runtime::Current()->GetClassLinker()->MoveImageClassesToClassTable();
+ GetInternTable()->AddImageStringsToTable(image_space);
+ GetClassLinker()->MoveImageClassesToClassTable();
}
}
- if (!IsImageDex2OatEnabled() || !Runtime::Current()->GetHeap()->HasImageSpace()) {
+ // If we are the zygote then we need to wait until after forking to create the code cache due to
+ // SELinux restrictions on r/w/x memory regions.
+ if (!IsZygote() && jit_.get() != nullptr) {
+ jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold());
+ jit_->CreateThreadPool();
+ }
+
+ if (!IsImageDex2OatEnabled() || !GetHeap()->HasImageSpace()) {
ScopedObjectAccess soa(self);
StackHandleScope<1> hs(soa.Self());
auto klass(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
@@ -590,8 +612,14 @@ void Runtime::DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const ch
}
}
- // Create the thread pool.
+ // Create the thread pools.
heap_->CreateThreadPool();
+ if (jit_options_.get() != nullptr && jit_.get() == nullptr) {
+ // Create the JIT if the flag is set and we haven't already create it (happens for run-tests).
+ CreateJit();
+ jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold());
+ jit_->CreateThreadPool();
+ }
StartSignalCatcher();
@@ -818,6 +846,17 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
Dbg::ConfigureJdwp(runtime_options.GetOrDefault(Opt::JdwpOptions));
}
+ if (!IsCompiler()) {
+ // If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
+ // this case.
+ // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
+ // nullptr and we don't create the jit.
+ jit_options_.reset(jit::JitOptions::CreateFromRuntimeArguments(runtime_options));
+ }
+ if (!IsZygote() && jit_options_.get() != nullptr) {
+ CreateJit();
+ }
+
BlockSignals();
InitPlatformSignalHandlers();
@@ -1066,26 +1105,26 @@ void Runtime::InitThreadGroups(Thread* self) {
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
- CHECK(main_thread_group_ != NULL || IsCompiler());
+ CHECK(main_thread_group_ != NULL || IsAotCompiler());
system_thread_group_ =
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
- CHECK(system_thread_group_ != NULL || IsCompiler());
+ CHECK(system_thread_group_ != NULL || IsAotCompiler());
}
jobject Runtime::GetMainThreadGroup() const {
- CHECK(main_thread_group_ != NULL || IsCompiler());
+ CHECK(main_thread_group_ != NULL || IsAotCompiler());
return main_thread_group_;
}
jobject Runtime::GetSystemThreadGroup() const {
- CHECK(system_thread_group_ != NULL || IsCompiler());
+ CHECK(system_thread_group_ != NULL || IsAotCompiler());
return system_thread_group_;
}
jobject Runtime::GetSystemClassLoader() const {
- CHECK(system_class_loader_ != NULL || IsCompiler());
+ CHECK(system_class_loader_ != NULL || IsAotCompiler());
return system_class_loader_;
}
@@ -1341,7 +1380,7 @@ mirror::ArtMethod* Runtime::CreateImtConflictMethod() {
// TODO: use a special method for imt conflict method saves.
method->SetDexMethodIndex(DexFile::kDexNoIndex);
// When compiling, the code pointer will get set later when the image is loaded.
- if (runtime->IsCompiler()) {
+ if (runtime->IsAotCompiler()) {
size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
@@ -1350,6 +1389,10 @@ mirror::ArtMethod* Runtime::CreateImtConflictMethod() {
return method.Get();
}
+void Runtime::SetImtConflictMethod(mirror::ArtMethod* method) {
+ imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method);
+}
+
mirror::ArtMethod* Runtime::CreateResolutionMethod() {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
@@ -1360,7 +1403,7 @@ mirror::ArtMethod* Runtime::CreateResolutionMethod() {
// TODO: use a special method for resolution method saves
method->SetDexMethodIndex(DexFile::kDexNoIndex);
// When compiling, the code pointer will get set later when the image is loaded.
- if (runtime->IsCompiler()) {
+ if (runtime->IsAotCompiler()) {
size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
} else {
@@ -1491,14 +1534,14 @@ void Runtime::StartProfiler(const char* profile_output_filename) {
// Transaction support.
void Runtime::EnterTransactionMode(Transaction* transaction) {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(transaction != nullptr);
DCHECK(!IsActiveTransaction());
preinitialization_transaction_ = transaction;
}
void Runtime::ExitTransactionMode() {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_ = nullptr;
}
@@ -1558,51 +1601,51 @@ void Runtime::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offs
void Runtime::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset,
uint32_t value, bool is_volatile) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteField32(obj, field_offset, value, is_volatile);
}
void Runtime::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset,
uint64_t value, bool is_volatile) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteField64(obj, field_offset, value, is_volatile);
}
void Runtime::RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
mirror::Object* value, bool is_volatile) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteFieldReference(obj, field_offset, value, is_volatile);
}
void Runtime::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteArray(array, index, value);
}
void Runtime::RecordStrongStringInsertion(mirror::String* s) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordStrongStringInsertion(s);
}
void Runtime::RecordWeakStringInsertion(mirror::String* s) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWeakStringInsertion(s);
}
void Runtime::RecordStrongStringRemoval(mirror::String* s) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordStrongStringRemoval(s);
}
void Runtime::RecordWeakStringRemoval(mirror::String* s) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWeakStringRemoval(s);
}
@@ -1634,4 +1677,16 @@ void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::strin
void Runtime::UpdateProfilerState(int state) {
VLOG(profiler) << "Profiler state updated to " << state;
}
+
+void Runtime::CreateJit() {
+ CHECK(jit_options_.get() != nullptr);
+ std::string error_msg;
+ jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
+ if (jit_.get() != nullptr) {
+ compiler_callbacks_ = jit_->GetCompilerCallbacks();
+ } else {
+ LOG(WARNING) << "Failed to create JIT " << error_msg;
+ }
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 944c8bd..5078b7f 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -48,6 +48,12 @@ namespace gc {
class GarbageCollector;
} // namespace collector
} // namespace gc
+
+namespace jit {
+ class Jit;
+ class JitOptions;
+} // namespace jit
+
namespace mirror {
class ArtMethod;
class ClassLoader;
@@ -95,12 +101,18 @@ class Runtime {
static bool Create(const RuntimeOptions& options, bool ignore_unrecognized)
SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
+ // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
+ bool IsAotCompiler() const {
+ return !UseJit() && IsCompiler();
+ }
+
+ // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
bool IsCompiler() const {
return compiler_callbacks_ != nullptr;
}
bool CanRelocate() const {
- return !IsCompiler() || compiler_callbacks_->IsRelocationPossible();
+ return !IsAotCompiler() || compiler_callbacks_->IsRelocationPossible();
}
bool ShouldRelocate() const {
@@ -339,9 +351,7 @@ class Runtime {
return !imt_conflict_method_.IsNull();
}
- void SetImtConflictMethod(mirror::ArtMethod* method) {
- imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method);
- }
+ void SetImtConflictMethod(mirror::ArtMethod* method);
void SetImtUnimplementedMethod(mirror::ArtMethod* method) {
imt_unimplemented_method_ = GcRoot<mirror::ArtMethod>(method);
}
@@ -421,6 +431,14 @@ class Runtime {
kUnload,
kInitialize
};
+
+ jit::Jit* GetJit() {
+ return jit_.get();
+ }
+ bool UseJit() const {
+ return jit_.get() != nullptr;
+ }
+
void PreZygoteFork();
bool InitZygote();
void DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const char* isa);
@@ -525,6 +543,8 @@ class Runtime {
return zygote_max_failed_boots_;
}
+ void CreateJit();
+
private:
static void InitPlatformSignalHandlers();
@@ -604,6 +624,9 @@ class Runtime {
JavaVMExt* java_vm_;
+ std::unique_ptr<jit::Jit> jit_;
+ std::unique_ptr<jit::JitOptions> jit_options_;
+
// Fault message, printed when we get a SIGSEGV.
Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::string fault_message_ GUARDED_BY(fault_message_lock_);
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index d9cc4d4..d072ffa 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -63,6 +63,9 @@ RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint)
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
RUNTIME_OPTIONS_KEY (bool, UseTLAB, false)
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
+RUNTIME_OPTIONS_KEY (bool, UseJIT, false)
+RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
+RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheCapacity, jit::JitCodeCache::kDefaultCapacity)
RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
HSpaceCompactForOOMMinIntervalsMs,\
MsToNs(100 * 1000)) // 100s
diff --git a/runtime/runtime_options.h b/runtime/runtime_options.h
index ebd52d7..7e59000 100644
--- a/runtime/runtime_options.h
+++ b/runtime/runtime_options.h
@@ -26,6 +26,8 @@
#include "runtime/base/logging.h"
#include "cmdline/unit.h"
#include "jdwp/jdwp.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
#include "profiler_options.h"
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index e377542..26bf655 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -180,7 +180,7 @@ void* SignalCatcher::Run(void* arg) {
Runtime* runtime = Runtime::Current();
CHECK(runtime->AttachCurrentThread("Signal Catcher", true, runtime->GetSystemThreadGroup(),
- !runtime->IsCompiler()));
+ !runtime->IsAotCompiler()));
Thread* self = Thread::Current();
DCHECK_NE(self->GetState(), kRunnable);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 3b48f49..79d0066 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -541,7 +541,7 @@ bool Thread::InitStackHwm() {
// Set stack_end_ to the bottom of the stack saving space of stack overflows
Runtime* runtime = Runtime::Current();
- bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsCompiler();
+ bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
ResetDefaultStackEnd();
// Install the protected region if we are doing implicit overflow checks.
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 0950abe..93b3877 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -298,7 +298,7 @@ void* Trace::RunSamplingThread(void* arg) {
intptr_t interval_us = reinterpret_cast<intptr_t>(arg);
CHECK_GE(interval_us, 0);
CHECK(runtime->AttachCurrentThread("Sampling Profiler", true, runtime->GetSystemThreadGroup(),
- !runtime->IsCompiler()));
+ !runtime->IsAotCompiler()));
while (true) {
usleep(interval_us);
@@ -627,6 +627,12 @@ void Trace::ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
+void Trace::BackwardBranch(Thread* /*thread*/, mirror::ArtMethod* method,
+ int32_t /*dex_pc_offset*/)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ LOG(ERROR) << "Unexpected backward branch event in tracing" << PrettyMethod(method);
+}
+
void Trace::ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff) {
if (UseThreadCpuClock()) {
uint64_t clock_base = thread->GetTraceClockBase();
diff --git a/runtime/trace.h b/runtime/trace.h
index ead1c29..9ba30d5 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -99,7 +99,8 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
-
+ void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
// Reuse an old stack trace if it exists, otherwise allocate a new one.
static std::vector<mirror::ArtMethod*>* AllocStackTrace();
// Clear and store an old stack trace for later use.
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index 7e2e0a6..e26f955 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -32,7 +32,7 @@ static constexpr bool kEnableTransactionStats = false;
Transaction::Transaction()
: log_lock_("transaction log lock", kTransactionLogLock), aborted_(false) {
- CHECK(Runtime::Current()->IsCompiler());
+ CHECK(Runtime::Current()->IsAotCompiler());
}
Transaction::~Transaction() {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 85c9340..dea8f79 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1262,9 +1262,9 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
os << "+" << it->func_offset;
}
try_addr2line = true;
- } else if (current_method != nullptr &&
- Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
- current_method->PcIsWithinQuickCode(it->pc)) {
+ } else if (
+ current_method != nullptr && Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
+ current_method->PcIsWithinQuickCode(it->pc)) {
const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
os << JniLongName(current_method) << "+"
<< (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 474a066..87a29ed 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -24,6 +24,7 @@
#include "compiler_callbacks.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
+#include "dex_instruction_utils.h"
#include "dex_instruction_visitor.h"
#include "gc/accounting/card_table-inl.h"
#include "indenter.h"
@@ -111,6 +112,20 @@ static void SafelyMarkAllRegistersAsConflicts(MethodVerifier* verifier, Register
reg_line->MarkAllRegistersAsConflicts(verifier);
}
+MethodVerifier::FailureKind MethodVerifier::VerifyMethod(
+ mirror::ArtMethod* method, bool allow_soft_failures, std::string* error ATTRIBUTE_UNUSED) {
+ Thread* self = Thread::Current();
+ StackHandleScope<3> hs(self);
+ mirror::Class* klass = method->GetDeclaringClass();
+ auto h_dex_cache(hs.NewHandle(klass->GetDexCache()));
+ auto h_class_loader(hs.NewHandle(klass->GetClassLoader()));
+ auto h_method = hs.NewHandle(method);
+ return VerifyMethod(self, method->GetDexMethodIndex(), method->GetDexFile(), h_dex_cache,
+ h_class_loader, klass->GetClassDef(), method->GetCodeItem(), h_method,
+ method->GetAccessFlags(), allow_soft_failures, false);
+}
+
+
MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
mirror::Class* klass,
bool allow_soft_failures,
@@ -136,7 +151,7 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
}
if (early_failure) {
*error = "Verifier rejected class " + PrettyDescriptor(klass) + failure_message;
- if (Runtime::Current()->IsCompiler()) {
+ if (Runtime::Current()->IsAotCompiler()) {
ClassReference ref(&dex_file, klass->GetDexClassDefIndex());
Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref);
}
@@ -544,7 +559,7 @@ std::ostream& MethodVerifier::Fail(VerifyError error) {
case VERIFY_ERROR_ACCESS_METHOD:
case VERIFY_ERROR_INSTANTIATION:
case VERIFY_ERROR_CLASS_CHANGE:
- if (Runtime::Current()->IsCompiler() || !can_load_classes_) {
+ if (Runtime::Current()->IsAotCompiler() || !can_load_classes_) {
// If we're optimistically running verification at compile time, turn NO_xxx, ACCESS_xxx,
// class change and instantiation errors into soft verification errors so that we re-verify
// at runtime. We may fail to find or to agree on access because of not yet available class
@@ -568,7 +583,7 @@ std::ostream& MethodVerifier::Fail(VerifyError error) {
// Hard verification failures at compile time will still fail at runtime, so the class is
// marked as rejected to prevent it from being compiled.
case VERIFY_ERROR_BAD_CLASS_HARD: {
- if (Runtime::Current()->IsCompiler()) {
+ if (Runtime::Current()->IsAotCompiler()) {
ClassReference ref(dex_file_, dex_file_->GetIndexForClassDef(*class_def_));
Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref);
}
@@ -844,7 +859,7 @@ bool MethodVerifier::VerifyInstruction(const Instruction* inst, uint32_t code_of
result = false;
break;
}
- if (inst->GetVerifyIsRuntimeOnly() && Runtime::Current()->IsCompiler() && !verify_to_dump_) {
+ if (inst->GetVerifyIsRuntimeOnly() && Runtime::Current()->IsAotCompiler() && !verify_to_dump_) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "opcode only expected at runtime " << inst->Name();
result = false;
}
@@ -2812,8 +2827,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
if (have_pending_hard_failure_) {
- if (Runtime::Current()->IsCompiler()) {
- /* When compiling, check that the last failure is a hard failure */
+ if (Runtime::Current()->IsAotCompiler()) {
+ /* When AOT compiling, check that the last failure is a hard failure */
CHECK_EQ(failures_[failures_.size() - 1], VERIFY_ERROR_BAD_CLASS_HARD);
}
/* immediate failure, reject class */
@@ -3941,28 +3956,16 @@ void MethodVerifier::VerifyISFieldAccess(const Instruction* inst, const RegType&
mirror::ArtField* MethodVerifier::GetQuickFieldAccess(const Instruction* inst,
RegisterLine* reg_line) {
- DCHECK(inst->Opcode() == Instruction::IGET_QUICK ||
- inst->Opcode() == Instruction::IGET_WIDE_QUICK ||
- inst->Opcode() == Instruction::IGET_OBJECT_QUICK ||
- inst->Opcode() == Instruction::IGET_BOOLEAN_QUICK ||
- inst->Opcode() == Instruction::IGET_BYTE_QUICK ||
- inst->Opcode() == Instruction::IGET_CHAR_QUICK ||
- inst->Opcode() == Instruction::IGET_SHORT_QUICK ||
- inst->Opcode() == Instruction::IPUT_QUICK ||
- inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
- inst->Opcode() == Instruction::IPUT_OBJECT_QUICK ||
- inst->Opcode() == Instruction::IPUT_BOOLEAN_QUICK ||
- inst->Opcode() == Instruction::IPUT_BYTE_QUICK ||
- inst->Opcode() == Instruction::IPUT_CHAR_QUICK ||
- inst->Opcode() == Instruction::IPUT_SHORT_QUICK);
+ DCHECK(IsInstructionIGetQuickOrIPutQuick(inst->Opcode())) << inst->Opcode();
const RegType& object_type = reg_line->GetRegisterType(this, inst->VRegB_22c());
if (!object_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << object_type << "'";
return nullptr;
}
uint32_t field_offset = static_cast<uint32_t>(inst->VRegC_22c());
- mirror::ArtField* f = mirror::ArtField::FindInstanceFieldWithOffset(object_type.GetClass(),
- field_offset);
+ mirror::ArtField* const f = mirror::ArtField::FindInstanceFieldWithOffset(object_type.GetClass(),
+ field_offset);
+ DCHECK_EQ(f->GetOffset().Uint32Value(), field_offset);
if (f == nullptr) {
VLOG(verifier) << "Failed to find instance field at offset '" << field_offset
<< "' from '" << PrettyDescriptor(object_type.GetClass()) << "'";
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index b83e647..bdd6259 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -156,6 +156,9 @@ class MethodVerifier {
uint32_t method_access_flags)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static FailureKind VerifyMethod(mirror::ArtMethod* method, bool allow_soft_failures,
+ std::string* error) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
uint8_t EncodePcToReferenceMapData() const;
uint32_t DexFileVersion() const {
@@ -239,10 +242,14 @@ class MethodVerifier {
bool HasFailures() const;
const RegType& ResolveCheckedClass(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst,
- RegisterLine* reg_line,
+ // Returns the method of a quick invoke or nullptr if it cannot be found.
+ mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
bool is_range)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Returns the access field of a quick field access (iget/iput-quick) or nullptr
+ // if it cannot be found.
+ mirror::ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Is the method being verified a constructor?
bool IsConstructor() const {
@@ -532,11 +539,6 @@ class MethodVerifier {
bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns the access field of a quick field access (iget/iput-quick) or nullptr
- // if it cannot be found.
- mirror::ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
template <FieldAccessType kAccType>
void VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);