summaryrefslogtreecommitdiffstats
path: root/runtime/stack.cc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2015-04-22 13:56:20 -0700
committerMathieu Chartier <mathieuc@google.com>2015-06-02 09:21:27 -0700
commit3d21bdf8894e780d349c481e5c9e29fe1556051c (patch)
tree61a5231f36c0dabd73457fec81df103462a05aff /runtime/stack.cc
parent71f0a8a123fa27bdc857a98afebbaf0ed09dac15 (diff)
downloadart-3d21bdf8894e780d349c481e5c9e29fe1556051c.zip
art-3d21bdf8894e780d349c481e5c9e29fe1556051c.tar.gz
art-3d21bdf8894e780d349c481e5c9e29fe1556051c.tar.bz2
Move mirror::ArtMethod to native
Optimizing + quick tests are passing, devices boot. TODO: Test and fix bugs in mips64. Saves 16 bytes per most ArtMethod, 7.5MB reduction in system PSS. Some of the savings are from removal of virtual methods and direct methods object arrays. Bug: 19264997 (cherry picked from commit e401d146407d61eeb99f8d6176b2ac13c4df1e33) Change-Id: I622469a0cfa0e7082a2119f3d6a9491eb61e3f3d Fix some ArtMethod related bugs Added root visiting for runtime methods, not currently required since the GcRoots in these methods are null. Added missing GetInterfaceMethodIfProxy in GetMethodLine, fixes --trace run-tests 005, 044. Fixed optimizing compiler bug where we used a normal stack location instead of double on ARM64, this fixes the debuggable tests. TODO: Fix JDWP tests. Bug: 19264997 Change-Id: I7c55f69c61d1b45351fd0dc7185ffe5efad82bd3 ART: Fix casts for 64-bit pointers on 32-bit compiler. Bug: 19264997 Change-Id: Ief45cdd4bae5a43fc8bfdfa7cf744e2c57529457 Fix JDWP tests after ArtMethod change Fixes Throwable::GetStackDepth for exception event detection after internal stack trace representation change. Adds missing ArtMethod::GetInterfaceMethodIfProxy call in case of proxy method. Bug: 19264997 Change-Id: I363e293796848c3ec491c963813f62d868da44d2 Fix accidental IMT and root marking regression Was always using the conflict trampoline. Also included fix for regression in GC time caused by extra roots. Most of the regression was IMT. Fixed bug in DumpGcPerformanceInfo where we would get SIGABRT due to detached thread. EvaluateAndApplyChanges: From ~2500 -> ~1980 GC time: 8.2s -> 7.2s due to 1s less of MarkConcurrentRoots Bug: 19264997 Change-Id: I4333e80a8268c2ed1284f87f25b9f113d4f2c7e0 Fix bogus image test assert Previously we were comparing the size of the non moving space to size of the image file. Now we properly compare the size of the image space against the size of the image file. Bug: 19264997 Change-Id: I7359f1f73ae3df60c5147245935a24431c04808a [MIPS64] Fix art_quick_invoke_stub argument offsets. ArtMethod reference's size got bigger, so we need to move other args and leave enough space for ArtMethod* and 'this' pointer. This fixes mips64 boot. Bug: 19264997 Change-Id: I47198d5f39a4caab30b3b77479d5eedaad5006ab
Diffstat (limited to 'runtime/stack.cc')
-rw-r--r--runtime/stack.cc136
1 files changed, 107 insertions, 29 deletions
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 800acaa..6cca4d2 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -17,12 +17,14 @@
#include "stack.h"
#include "arch/context.h"
+#include "art_method-inl.h"
#include "base/hex_dump.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc_map.h"
-#include "mirror/art_method-inl.h"
+#include "gc/space/image_space.h"
+#include "gc/space/space-inl.h"
+#include "linear_alloc.h"
#include "mirror/class-inl.h"
-#include "mirror/object.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "quick/quick_method_frame_info.h"
@@ -34,8 +36,10 @@
namespace art {
+static constexpr bool kDebugStackWalk = false;
+
mirror::Object* ShadowFrame::GetThisObject() const {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (m->IsStatic()) {
return nullptr;
} else if (m->IsNative()) {
@@ -49,7 +53,7 @@ mirror::Object* ShadowFrame::GetThisObject() const {
}
mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (m->IsStatic()) {
return nullptr;
} else {
@@ -113,11 +117,12 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
}
}
-extern "C" mirror::Object* artQuickGetProxyThisObject(StackReference<mirror::ArtMethod>* sp)
+extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* StackVisitor::GetThisObject() const {
- mirror::ArtMethod* m = GetMethod();
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+ ArtMethod* m = GetMethod();
if (m->IsStatic()) {
return nullptr;
} else if (m->IsNative()) {
@@ -156,7 +161,7 @@ size_t StackVisitor::GetNativePcOffset() const {
return GetMethod()->NativeQuickPcOffset(cur_quick_frame_pc_);
}
-bool StackVisitor::IsReferenceVReg(mirror::ArtMethod* m, uint16_t vreg) {
+bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
// Process register map (which native and runtime methods don't have)
if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
return false;
@@ -183,8 +188,7 @@ bool StackVisitor::IsReferenceVReg(mirror::ArtMethod* m, uint16_t vreg) {
return vreg < num_regs && TestBitmap(vreg, reg_bitmap);
}
-bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
- uint32_t* val) const {
+bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const {
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
@@ -200,7 +204,7 @@ bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
}
}
-bool StackVisitor::GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
@@ -223,7 +227,7 @@ bool StackVisitor::GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRe
}
}
-bool StackVisitor::GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
@@ -287,7 +291,7 @@ bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t
return true;
}
-bool StackVisitor::GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
VRegKind kind_hi, uint64_t* val) const {
if (kind_lo == kLongLoVReg) {
DCHECK_EQ(kind_hi, kLongHiVReg);
@@ -312,7 +316,7 @@ bool StackVisitor::GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kin
}
}
-bool StackVisitor::GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
VRegKind kind_hi, uint64_t* val) const {
const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
@@ -339,7 +343,7 @@ bool StackVisitor::GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg,
}
}
-bool StackVisitor::GetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg,
+bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const {
uint32_t low_32bits;
@@ -371,7 +375,7 @@ bool StackVisitor::GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi,
return true;
}
-bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
+bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value,
VRegKind kind) {
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
@@ -387,7 +391,7 @@ bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_val
}
}
-bool StackVisitor::SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
+bool StackVisitor::SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value,
VRegKind kind) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
@@ -445,7 +449,7 @@ bool StackVisitor::SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRe
return true;
}
-bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+bool StackVisitor::SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
VRegKind kind_lo, VRegKind kind_hi) {
if (kind_lo == kLongLoVReg) {
DCHECK_EQ(kind_hi, kLongHiVReg);
@@ -470,7 +474,7 @@ bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new
}
bool StackVisitor::SetVRegPairFromQuickCode(
- mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
+ ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
@@ -586,7 +590,7 @@ size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
return visitor.frames;
}
-bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc) {
+bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) {
struct HasMoreFramesVisitor : public StackVisitor {
HasMoreFramesVisitor(Thread* thread,
StackWalkKind walk_kind,
@@ -602,7 +606,7 @@ bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (found_frame_) {
- mirror::ArtMethod* method = GetMethod();
+ ArtMethod* method = GetMethod();
if (method != nullptr && !method->IsRuntimeMethod()) {
has_more_frames_ = true;
next_method_ = method;
@@ -618,7 +622,7 @@ bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32
size_t frame_height_;
bool found_frame_;
bool has_more_frames_;
- mirror::ArtMethod* next_method_;
+ ArtMethod* next_method_;
uint32_t next_dex_pc_;
};
HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight());
@@ -644,7 +648,7 @@ void StackVisitor::DescribeStack(Thread* thread) {
std::string StackVisitor::DescribeLocation() const {
std::string result("Visiting method '");
- mirror::ArtMethod* m = GetMethod();
+ ArtMethod* m = GetMethod();
if (m == nullptr) {
return "upcall";
}
@@ -664,8 +668,34 @@ static instrumentation::InstrumentationStackFrame& GetInstrumentationStackFrame(
void StackVisitor::SanityCheckFrame() const {
if (kIsDebugBuild) {
- mirror::ArtMethod* method = GetMethod();
- CHECK_EQ(method->GetClass(), mirror::ArtMethod::GetJavaLangReflectArtMethod());
+ ArtMethod* method = GetMethod();
+ auto* declaring_class = method->GetDeclaringClass();
+ // Runtime methods have null declaring class.
+ if (!method->IsRuntimeMethod()) {
+ CHECK(declaring_class != nullptr);
+ CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass())
+ << declaring_class;
+ } else {
+ CHECK(declaring_class == nullptr);
+ }
+ auto* runtime = Runtime::Current();
+ auto* la = runtime->GetLinearAlloc();
+ if (!la->Contains(method)) {
+ // Check image space.
+ bool in_image = false;
+ for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
+ if (space->IsImageSpace()) {
+ auto* image_space = space->AsImageSpace();
+ const auto& header = image_space->GetImageHeader();
+ const auto* methods = &header.GetMethodsSection();
+ if (methods->Contains(reinterpret_cast<const uint8_t*>(method) - image_space->Begin())) {
+ in_image = true;
+ break;
+ }
+ }
+ }
+ CHECK(in_image) << PrettyMethod(method) << " not in linear alloc or image";
+ }
if (cur_quick_frame_ != nullptr) {
method->AssertPcIsWithinQuickCode(cur_quick_frame_pc_);
// Frame sanity.
@@ -701,7 +731,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
if (cur_quick_frame_ != nullptr) { // Handle quick stack frames.
// Can't be both a shadow and a quick fragment.
DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
- mirror::ArtMethod* method = cur_quick_frame_->AsMirrorPtr();
+ ArtMethod* method = *cur_quick_frame_;
while (method != nullptr) {
SanityCheckFrame();
bool should_continue = VisitFrame();
@@ -727,8 +757,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
if (GetMethod() == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)) {
// Skip runtime save all callee frames which are used to deliver exceptions.
} else if (instrumentation_frame.interpreter_entry_) {
- mirror::ArtMethod* callee =
- Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+ ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: "
<< PrettyMethod(GetMethod());
} else if (instrumentation_frame.method_ != GetMethod()) {
@@ -747,9 +776,20 @@ void StackVisitor::WalkStack(bool include_transitions) {
}
cur_quick_frame_pc_ = return_pc;
uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
- cur_quick_frame_ = reinterpret_cast<StackReference<mirror::ArtMethod>*>(next_frame);
+ cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
+
+ if (kDebugStackWalk) {
+ LOG(INFO) << PrettyMethod(method) << "@" << method << " size=" << frame_size
+ << " optimized=" << method->IsOptimized(sizeof(void*))
+ << " native=" << method->IsNative()
+ << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
+ << "," << method->GetEntryPointFromJni()
+ << "," << method->GetEntryPointFromInterpreter()
+ << " next=" << *cur_quick_frame_;
+ }
+
cur_depth_++;
- method = cur_quick_frame_->AsMirrorPtr();
+ method = *cur_quick_frame_;
}
} else if (cur_shadow_frame_ != nullptr) {
do {
@@ -782,4 +822,42 @@ void JavaFrameRootInfo::Describe(std::ostream& os) const {
visitor->DescribeLocation() << " vreg=" << vreg_;
}
+int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
+ uint32_t core_spills, uint32_t fp_spills,
+ size_t frame_size, int reg, InstructionSet isa) {
+ size_t pointer_size = InstructionSetPointerSize(isa);
+ if (kIsDebugBuild) {
+ auto* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size);
+ }
+ }
+ DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
+ DCHECK_NE(reg, -1);
+ int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
+ + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)
+ + sizeof(uint32_t); // Filler.
+ int num_regs = code_item->registers_size_ - code_item->ins_size_;
+ int temp_threshold = code_item->registers_size_;
+ const int max_num_special_temps = 1;
+ if (reg == temp_threshold) {
+ // The current method pointer corresponds to special location on stack.
+ return 0;
+ } else if (reg >= temp_threshold + max_num_special_temps) {
+ /*
+ * Special temporaries may have custom locations and the logic above deals with that.
+ * However, non-special temporaries are placed relative to the outs.
+ */
+ int temps_start = code_item->outs_size_ * sizeof(uint32_t) + pointer_size /* art method */;
+ int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
+ return temps_start + relative_offset;
+ } else if (reg < num_regs) {
+ int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t);
+ return locals_start + (reg * sizeof(uint32_t));
+ } else {
+ // Handle ins.
+ return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + pointer_size /* art method */;
+ }
+}
+
} // namespace art