summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastien Hertz <shertz@google.com>2014-04-28 15:03:08 +0200
committerSebastien Hertz <shertz@google.com>2014-04-29 09:04:18 +0200
commit714f175bd66d03225927a84f3d5dbc923c5a3e7e (patch)
tree093c8564827484af598bd116c7560b1893910e3d
parentd55e9b1a9f45c19cd7b376a8839ce37f86c66a64 (diff)
downloadart-714f175bd66d03225927a84f3d5dbc923c5a3e7e.zip
art-714f175bd66d03225927a84f3d5dbc923c5a3e7e.tar.gz
art-714f175bd66d03225927a84f3d5dbc923c5a3e7e.tar.bz2
Visit deoptimized shadow frames as roots
During deoptimization, we create shadow frames but do not attach them to the stack until we transition to interpreter mode. If a GC happens before that, these shadow frames are not visited by GC so they may contain stale references. This CL addresses this issue by visiting the shadow frames "under contruction" in Thread::VisitRoots so we correctly update all references they hold. To make them visible, we now save the top shadow frame (the first one created) in the field Thread::tls_ptr_sized_values::deoptimization_shadow_frame. This field will then be cleared when transitioning to interpreter mode. Bug: 14324885 Change-Id: Ib213ddc80f19cfcdfcec6f20acaa7f1a0e9ce2c1
-rw-r--r--runtime/deoptimize_stack_visitor.cc2
-rw-r--r--runtime/deoptimize_stack_visitor.h2
-rw-r--r--runtime/entrypoints/portable/portable_thread_entrypoints.cc2
-rw-r--r--runtime/quick_exception_handler.cc6
-rw-r--r--runtime/quick_exception_handler.h6
-rw-r--r--runtime/thread.cc162
-rw-r--r--runtime/thread.h4
7 files changed, 98 insertions, 86 deletions
diff --git a/runtime/deoptimize_stack_visitor.cc b/runtime/deoptimize_stack_visitor.cc
index f2eaf00..3eb1792 100644
--- a/runtime/deoptimize_stack_visitor.cc
+++ b/runtime/deoptimize_stack_visitor.cc
@@ -78,7 +78,7 @@ bool DeoptimizeStackVisitor::HandleDeoptimization(mirror::ArtMethod* m) {
if (prev_shadow_frame_ != nullptr) {
prev_shadow_frame_->SetLink(new_frame);
} else {
- exception_handler_->SetTopShadowFrame(new_frame);
+ self_->SetDeoptimizationShadowFrame(new_frame);
}
prev_shadow_frame_ = new_frame;
return true;
diff --git a/runtime/deoptimize_stack_visitor.h b/runtime/deoptimize_stack_visitor.h
index c898e7d..c41b803 100644
--- a/runtime/deoptimize_stack_visitor.h
+++ b/runtime/deoptimize_stack_visitor.h
@@ -19,6 +19,7 @@
#include "base/mutex.h"
#include "stack.h"
+#include "thread.h"
namespace art {
@@ -35,6 +36,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(self, context), self_(self), exception_handler_(exception_handler),
prev_shadow_frame_(nullptr) {
+ CHECK(!self_->HasDeoptimizationShadowFrame());
}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/portable/portable_thread_entrypoints.cc b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
index 4f19964..9e62e0e 100644
--- a/runtime/entrypoints/portable/portable_thread_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
@@ -78,7 +78,7 @@ extern "C" void art_portable_test_suspend_from_code(Thread* self)
visitor.WalkStack(true);
self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy());
self->SetDeoptimizationReturnValue(JValue());
- self->SetException(ThrowLocation(), reinterpret_cast<mirror::Throwable*>(-1));
+ self->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
}
}
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index d5844b6..a91fdf1 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -28,7 +28,7 @@ QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimizatio
method_tracing_active_(is_deoptimization ||
Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
handler_quick_frame_(nullptr), handler_quick_frame_pc_(0), handler_dex_pc_(0),
- clear_exception_(false), top_shadow_frame_(nullptr), handler_frame_id_(kInvalidFrameId) {
+ clear_exception_(false), handler_frame_id_(kInvalidFrameId) {
}
void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
@@ -125,10 +125,6 @@ void QuickExceptionHandler::UpdateInstrumentationStack() {
}
void QuickExceptionHandler::DoLongJump() {
- if (is_deoptimization_) {
- // TODO: proper return value.
- self_->SetDeoptimizationShadowFrame(top_shadow_frame_);
- }
// Place context back on thread so it will be available when we continue.
self_->ReleaseLongJumpContext(context_);
context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_));
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index d06ce7c..ef3766c 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -66,10 +66,6 @@ class QuickExceptionHandler {
clear_exception_ = clear_exception;
}
- void SetTopShadowFrame(ShadowFrame* top_shadow_frame) {
- top_shadow_frame_ = top_shadow_frame;
- }
-
void SetHandlerFrameId(size_t frame_id) {
handler_frame_id_ = frame_id;
}
@@ -88,8 +84,6 @@ class QuickExceptionHandler {
uint32_t handler_dex_pc_;
// Should the exception be cleared as the catch block has no move-exception?
bool clear_exception_;
- // Deoptimization top shadow frame.
- ShadowFrame* top_shadow_frame_;
// Frame id of the catch handler or the upcall.
size_t handler_frame_id_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 095404f..2a7cfff 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1931,92 +1931,102 @@ class ReferenceMapVisitor : public StackVisitor {
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (false) {
LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
- << StringPrintf("@ PC:%04x", GetDexPc());
+ << StringPrintf("@ PC:%04x", GetDexPc());
}
ShadowFrame* shadow_frame = GetCurrentShadowFrame();
if (shadow_frame != nullptr) {
- mirror::ArtMethod* m = shadow_frame->GetMethod();
- size_t num_regs = shadow_frame->NumberOfVRegs();
- if (m->IsNative() || shadow_frame->HasReferenceArray()) {
- // SIRT for JNI or References for interpreter.
- for (size_t reg = 0; reg < num_regs; ++reg) {
+ VisitShadowFrame(shadow_frame);
+ } else {
+ VisitQuickFrame();
+ }
+ return true;
+ }
+
+ void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = shadow_frame->GetMethod();
+ size_t num_regs = shadow_frame->NumberOfVRegs();
+ if (m->IsNative() || shadow_frame->HasReferenceArray()) {
+ // SIRT for JNI or References for interpreter.
+ for (size_t reg = 0; reg < num_regs; ++reg) {
+ mirror::Object* ref = shadow_frame->GetVRegReference(reg);
+ if (ref != nullptr) {
+ mirror::Object* new_ref = ref;
+ visitor_(&new_ref, reg, this);
+ if (new_ref != ref) {
+ shadow_frame->SetVRegReference(reg, new_ref);
+ }
+ }
+ }
+ } else {
+ // Java method.
+ // Portable path use DexGcMap and store in Method.native_gc_map_.
+ const uint8_t* gc_map = m->GetNativeGcMap();
+ CHECK(gc_map != nullptr) << PrettyMethod(m);
+ verifier::DexPcToReferenceMap dex_gc_map(gc_map);
+ uint32_t dex_pc = shadow_frame->GetDexPC();
+ const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
+ DCHECK(reg_bitmap != nullptr);
+ num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
+ for (size_t reg = 0; reg < num_regs; ++reg) {
+ if (TestBitmap(reg, reg_bitmap)) {
mirror::Object* ref = shadow_frame->GetVRegReference(reg);
if (ref != nullptr) {
mirror::Object* new_ref = ref;
visitor_(&new_ref, reg, this);
if (new_ref != ref) {
- shadow_frame->SetVRegReference(reg, new_ref);
+ shadow_frame->SetVRegReference(reg, new_ref);
}
}
}
- } else {
- // Java method.
- // Portable path use DexGcMap and store in Method.native_gc_map_.
- const uint8_t* gc_map = m->GetNativeGcMap();
- CHECK(gc_map != nullptr) << PrettyMethod(m);
- verifier::DexPcToReferenceMap dex_gc_map(gc_map);
- uint32_t dex_pc = GetDexPc();
- const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
+ }
+ }
+ }
+
+ private:
+ void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = GetMethod();
+ // Process register map (which native and runtime methods don't have)
+ if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
+ const uint8_t* native_gc_map = m->GetNativeGcMap();
+ CHECK(native_gc_map != nullptr) << PrettyMethod(m);
+ mh_.ChangeMethod(m);
+ const DexFile::CodeItem* code_item = mh_.GetCodeItem();
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be nullptr or how would we compile its instructions?
+ NativePcOffsetToReferenceMap map(native_gc_map);
+ size_t num_regs = std::min(map.RegWidth() * 8,
+ static_cast<size_t>(code_item->registers_size_));
+ if (num_regs > 0) {
+ const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
DCHECK(reg_bitmap != nullptr);
- num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
+ const VmapTable vmap_table(m->GetVmapTable());
+ uint32_t core_spills = m->GetCoreSpillMask();
+ uint32_t fp_spills = m->GetFpSpillMask();
+ size_t frame_size = m->GetFrameSizeInBytes();
+ // For all dex registers in the bitmap
+ mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
+ DCHECK(cur_quick_frame != nullptr);
for (size_t reg = 0; reg < num_regs; ++reg) {
+ // Does this register hold a reference?
if (TestBitmap(reg, reg_bitmap)) {
- mirror::Object* ref = shadow_frame->GetVRegReference(reg);
- if (ref != nullptr) {
- mirror::Object* new_ref = ref;
- visitor_(&new_ref, reg, this);
- if (new_ref != ref) {
- shadow_frame->SetVRegReference(reg, new_ref);
+ uint32_t vmap_offset;
+ if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
+ int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg);
+ // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
+ mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
+ if (*ref_addr != nullptr) {
+ visitor_(ref_addr, reg, this);
}
- }
- }
- }
- }
- } else {
- mirror::ArtMethod* m = GetMethod();
- // Process register map (which native and runtime methods don't have)
- if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
- const uint8_t* native_gc_map = m->GetNativeGcMap();
- CHECK(native_gc_map != nullptr) << PrettyMethod(m);
- mh_.ChangeMethod(m);
- const DexFile::CodeItem* code_item = mh_.GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be nullptr or how would we compile its instructions?
- NativePcOffsetToReferenceMap map(native_gc_map);
- size_t num_regs = std::min(map.RegWidth() * 8,
- static_cast<size_t>(code_item->registers_size_));
- if (num_regs > 0) {
- const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
- DCHECK(reg_bitmap != nullptr);
- const VmapTable vmap_table(m->GetVmapTable());
- uint32_t core_spills = m->GetCoreSpillMask();
- uint32_t fp_spills = m->GetFpSpillMask();
- size_t frame_size = m->GetFrameSizeInBytes();
- // For all dex registers in the bitmap
- mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
- DCHECK(cur_quick_frame != nullptr);
- for (size_t reg = 0; reg < num_regs; ++reg) {
- // Does this register hold a reference?
- if (TestBitmap(reg, reg_bitmap)) {
- uint32_t vmap_offset;
- if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
- int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg);
- // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
- mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
- if (*ref_addr != nullptr) {
- visitor_(ref_addr, reg, this);
- }
- } else {
- StackReference<mirror::Object>* ref_addr =
- reinterpret_cast<StackReference<mirror::Object>*>(
- GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size,
- reg));
- mirror::Object* ref = ref_addr->AsMirrorPtr();
- if (ref != nullptr) {
- mirror::Object* new_ref = ref;
- visitor_(&new_ref, reg, this);
- if (ref != new_ref) {
- ref_addr->Assign(new_ref);
- }
+ } else {
+ StackReference<mirror::Object>* ref_addr =
+ reinterpret_cast<StackReference<mirror::Object>*>(
+ GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size,
+ reg));
+ mirror::Object* ref = ref_addr->AsMirrorPtr();
+ if (ref != nullptr) {
+ mirror::Object* new_ref = ref;
+ visitor_(&new_ref, reg, this);
+ if (ref != new_ref) {
+ ref_addr->Assign(new_ref);
}
}
}
@@ -2024,10 +2034,8 @@ class ReferenceMapVisitor : public StackVisitor {
}
}
}
- return true;
}
- private:
static bool TestBitmap(size_t reg, const uint8_t* reg_vector) {
return ((reg_vector[reg / kBitsPerByte] >> (reg % kBitsPerByte)) & 0x01) != 0;
}
@@ -2084,6 +2092,14 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) {
if (tlsPtr_.single_step_control != nullptr) {
tlsPtr_.single_step_control->VisitRoots(visitor, arg, thread_id, kRootDebugger);
}
+ if (tlsPtr_.deoptimization_shadow_frame != nullptr) {
+ RootCallbackVisitor visitorToCallback(visitor, arg, thread_id);
+ ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitorToCallback);
+ for (ShadowFrame* shadow_frame = tlsPtr_.deoptimization_shadow_frame; shadow_frame != nullptr;
+ shadow_frame = shadow_frame->GetLink()) {
+ mapper.VisitShadowFrame(shadow_frame);
+ }
+ }
// Visit roots on this thread's stack
Context* context = GetLongJumpContext();
RootCallbackVisitor visitorToCallback(visitor, arg, thread_id);
diff --git a/runtime/thread.h b/runtime/thread.h
index e5e4cae..f869285 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -696,6 +696,10 @@ class Thread {
ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
+ bool HasDeoptimizationShadowFrame() const {
+ return tlsPtr_.deoptimization_shadow_frame != nullptr;
+ }
+
std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
return tlsPtr_.instrumentation_stack;
}