summaryrefslogtreecommitdiffstats
path: root/runtime/stack.h
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2015-04-22 13:56:20 -0700
committerMathieu Chartier <mathieuc@google.com>2015-06-02 09:21:27 -0700
commit3d21bdf8894e780d349c481e5c9e29fe1556051c (patch)
tree61a5231f36c0dabd73457fec81df103462a05aff /runtime/stack.h
parent71f0a8a123fa27bdc857a98afebbaf0ed09dac15 (diff)
downloadart-3d21bdf8894e780d349c481e5c9e29fe1556051c.zip
art-3d21bdf8894e780d349c481e5c9e29fe1556051c.tar.gz
art-3d21bdf8894e780d349c481e5c9e29fe1556051c.tar.bz2
Move mirror::ArtMethod to native
Optimizing + quick tests are passing, devices boot. TODO: Test and fix bugs in mips64. Saves 16 bytes per most ArtMethod, 7.5MB reduction in system PSS. Some of the savings are from removal of virtual methods and direct methods object arrays. Bug: 19264997 (cherry picked from commit e401d146407d61eeb99f8d6176b2ac13c4df1e33) Change-Id: I622469a0cfa0e7082a2119f3d6a9491eb61e3f3d Fix some ArtMethod related bugs Added root visiting for runtime methods, not currently required since the GcRoots in these methods are null. Added missing GetInterfaceMethodIfProxy in GetMethodLine, fixes --trace run-tests 005, 044. Fixed optimizing compiler bug where we used a normal stack location instead of double on ARM64, this fixes the debuggable tests. TODO: Fix JDWP tests. Bug: 19264997 Change-Id: I7c55f69c61d1b45351fd0dc7185ffe5efad82bd3 ART: Fix casts for 64-bit pointers on 32-bit compiler. Bug: 19264997 Change-Id: Ief45cdd4bae5a43fc8bfdfa7cf744e2c57529457 Fix JDWP tests after ArtMethod change Fixes Throwable::GetStackDepth for exception event detection after internal stack trace representation change. Adds missing ArtMethod::GetInterfaceMethodIfProxy call in case of proxy method. Bug: 19264997 Change-Id: I363e293796848c3ec491c963813f62d868da44d2 Fix accidental IMT and root marking regression Was always using the conflict trampoline. Also included fix for regression in GC time caused by extra roots. Most of the regression was IMT. Fixed bug in DumpGcPerformanceInfo where we would get SIGABRT due to detached thread. EvaluateAndApplyChanges: From ~2500 -> ~1980 GC time: 8.2s -> 7.2s due to 1s less of MarkConcurrentRoots Bug: 19264997 Change-Id: I4333e80a8268c2ed1284f87f25b9f113d4f2c7e0 Fix bogus image test assert Previously we were comparing the size of the non moving space to size of the image file. Now we properly compare the size of the image space against the size of the image file. Bug: 19264997 Change-Id: I7359f1f73ae3df60c5147245935a24431c04808a [MIPS64] Fix art_quick_invoke_stub argument offsets. ArtMethod reference's size got bigger, so we need to move other args and leave enough space for ArtMethod* and 'this' pointer. This fixes mips64 boot. Bug: 19264997 Change-Id: I47198d5f39a4caab30b3b77479d5eedaad5006ab
Diffstat (limited to 'runtime/stack.h')
-rw-r--r--runtime/stack.h103
1 files changed, 34 insertions, 69 deletions
diff --git a/runtime/stack.h b/runtime/stack.h
index 4d36573..38dfe1b 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -31,10 +31,10 @@
namespace art {
namespace mirror {
- class ArtMethod;
class Object;
} // namespace mirror
+class ArtMethod;
class Context;
class ShadowFrame;
class HandleScope;
@@ -75,7 +75,7 @@ class ShadowFrame {
// Create ShadowFrame in heap for deoptimization.
static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link,
- mirror::ArtMethod* method, uint32_t dex_pc) {
+ ArtMethod* method, uint32_t dex_pc) {
uint8_t* memory = new uint8_t[ComputeSize(num_vregs)];
return Create(num_vregs, link, method, dex_pc, memory);
}
@@ -88,7 +88,7 @@ class ShadowFrame {
// Create ShadowFrame for interpreter using provided memory.
static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link,
- mirror::ArtMethod* method, uint32_t dex_pc, void* memory) {
+ ArtMethod* method, uint32_t dex_pc, void* memory) {
ShadowFrame* sf = new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true);
return sf;
}
@@ -238,16 +238,11 @@ class ShadowFrame {
}
}
- mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(method_ != nullptr);
return method_;
}
- mirror::ArtMethod** GetMethodAddress() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(method_ != nullptr);
- return &method_;
- }
-
mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -284,7 +279,7 @@ class ShadowFrame {
}
private:
- ShadowFrame(uint32_t num_vregs, ShadowFrame* link, mirror::ArtMethod* method,
+ ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method,
uint32_t dex_pc, bool has_reference_array)
: number_of_vregs_(num_vregs), link_(link), method_(method), dex_pc_(dex_pc) {
if (has_reference_array) {
@@ -308,7 +303,7 @@ class ShadowFrame {
const uint32_t number_of_vregs_;
// Link to previous shadow frame or null.
ShadowFrame* link_;
- mirror::ArtMethod* method_;
+ ArtMethod* method_;
uint32_t dex_pc_;
uint32_t vregs_[0];
@@ -356,11 +351,11 @@ class PACKED(4) ManagedStack {
return link_;
}
- StackReference<mirror::ArtMethod>* GetTopQuickFrame() const {
+ ArtMethod** GetTopQuickFrame() const {
return top_quick_frame_;
}
- void SetTopQuickFrame(StackReference<mirror::ArtMethod>* top) {
+ void SetTopQuickFrame(ArtMethod** top) {
DCHECK(top_shadow_frame_ == nullptr);
top_quick_frame_ = top;
}
@@ -403,7 +398,7 @@ class PACKED(4) ManagedStack {
bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
private:
- StackReference<mirror::ArtMethod>* top_quick_frame_;
+ ArtMethod** top_quick_frame_;
ManagedStack* link_;
ShadowFrame* top_shadow_frame_;
};
@@ -430,11 +425,11 @@ class StackVisitor {
void WalkStack(bool include_transitions = false)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (cur_shadow_frame_ != nullptr) {
return cur_shadow_frame_->GetMethod();
} else if (cur_quick_frame_ != nullptr) {
- return cur_quick_frame_->AsMirrorPtr();
+ return *cur_quick_frame_;
} else {
return nullptr;
}
@@ -484,30 +479,30 @@ class StackVisitor {
}
// Get the method and dex pc immediately after the one that's currently being visited.
- bool GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc)
+ bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsReferenceVReg(mirror::ArtMethod* m, uint16_t vreg)
+ bool IsReferenceVReg(ArtMethod* m, uint16_t vreg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
+ bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
+ bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
+ bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+ bool SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
VRegKind kind_lo, VRegKind kind_hi)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uintptr_t* GetGPRAddress(uint32_t reg) const;
// This is a fast-path for getting/setting values in a quick frame.
- uint32_t* GetVRegAddrFromQuickCode(StackReference<mirror::ArtMethod>* cur_quick_frame,
+ uint32_t* GetVRegAddrFromQuickCode(ArtMethod** cur_quick_frame,
const DexFile::CodeItem* code_item,
uint32_t core_spills, uint32_t fp_spills, size_t frame_size,
uint16_t vreg) const {
@@ -541,7 +536,7 @@ class StackVisitor {
* | IN[ins-1] | {Note: resides in caller's frame}
* | . |
* | IN[0] |
- * | caller's ArtMethod | ... StackReference<ArtMethod>
+ * | caller's ArtMethod | ... ArtMethod*
* +===============================+ {Note: start of callee's frame}
* | core callee-save spill | {variable sized}
* +-------------------------------+
@@ -568,46 +563,16 @@ class StackVisitor {
* | OUT[outs-2] |
* | . |
* | OUT[0] |
- * | StackReference<ArtMethod> | ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned
+ * | ArtMethod* | ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned
* +===============================+
*/
static int GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
uint32_t core_spills, uint32_t fp_spills,
- size_t frame_size, int reg, InstructionSet isa) {
- DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
- DCHECK_NE(reg, -1);
- int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
- + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)
- + sizeof(uint32_t); // Filler.
- int num_regs = code_item->registers_size_ - code_item->ins_size_;
- int temp_threshold = code_item->registers_size_;
- const int max_num_special_temps = 1;
- if (reg == temp_threshold) {
- // The current method pointer corresponds to special location on stack.
- return 0;
- } else if (reg >= temp_threshold + max_num_special_temps) {
- /*
- * Special temporaries may have custom locations and the logic above deals with that.
- * However, non-special temporaries are placed relative to the outs.
- */
- int temps_start = sizeof(StackReference<mirror::ArtMethod>) +
- code_item->outs_size_ * sizeof(uint32_t);
- int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
- return temps_start + relative_offset;
- } else if (reg < num_regs) {
- int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t);
- return locals_start + (reg * sizeof(uint32_t));
- } else {
- // Handle ins.
- return frame_size + ((reg - num_regs) * sizeof(uint32_t)) +
- sizeof(StackReference<mirror::ArtMethod>);
- }
- }
+ size_t frame_size, int reg, InstructionSet isa);
static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
- UNUSED(isa);
// According to stack model, the first out is above the Method referernce.
- return sizeof(StackReference<mirror::ArtMethod>) + (out_num * sizeof(uint32_t));
+ return InstructionSetPointerSize(isa) + out_num * sizeof(uint32_t);
}
bool IsInInlinedFrame() const {
@@ -618,7 +583,7 @@ class StackVisitor {
return cur_quick_frame_pc_;
}
- StackReference<mirror::ArtMethod>* GetCurrentQuickFrame() const {
+ ArtMethod** GetCurrentQuickFrame() const {
return cur_quick_frame_;
}
@@ -626,10 +591,10 @@ class StackVisitor {
return cur_shadow_frame_;
}
- HandleScope* GetCurrentHandleScope() const {
- StackReference<mirror::ArtMethod>* sp = GetCurrentQuickFrame();
- ++sp; // Skip Method*; handle scope comes next;
- return reinterpret_cast<HandleScope*>(sp);
+ HandleScope* GetCurrentHandleScope(size_t pointer_size) const {
+ ArtMethod** sp = GetCurrentQuickFrame();
+ // Skip ArtMethod*; handle scope comes next;
+ return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size);
}
std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -668,19 +633,19 @@ class StackVisitor {
uintptr_t GetFPR(uint32_t reg) const;
void SetFPR(uint32_t reg, uintptr_t value);
- bool GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+ bool GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+ bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+ bool GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
VRegKind kind_hi, uint64_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool GetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg,
+ bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -688,13 +653,13 @@ class StackVisitor {
uint64_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
+ bool SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value,
VRegKind kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+ bool SetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, uint64_t new_value,
VRegKind kind_lo, VRegKind kind_hi)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, uint64_t new_value,
@@ -706,7 +671,7 @@ class StackVisitor {
Thread* const thread_;
const StackWalkKind walk_kind_;
ShadowFrame* cur_shadow_frame_;
- StackReference<mirror::ArtMethod>* cur_quick_frame_;
+ ArtMethod** cur_quick_frame_;
uintptr_t cur_quick_frame_pc_;
// Lazily computed, number of frames in the stack.
size_t num_frames_;