summaryrefslogtreecommitdiffstats
path: root/runtime/stack.cc
diff options
context:
space:
mode:
authorbuzbee <buzbee@google.com>2014-06-21 15:31:01 -0700
committerAndreas Gampe <agampe@google.com>2014-07-03 00:12:07 -0700
commitb5860fb459f1ed71f39d8a87b45bee6727d79fe8 (patch)
tree3ac54afcb83678d3edfef855f62b79de8b3fff85 /runtime/stack.cc
parent555377d55c37db860583e0655f63a1dacb589921 (diff)
downloadart-b5860fb459f1ed71f39d8a87b45bee6727d79fe8.zip
art-b5860fb459f1ed71f39d8a87b45bee6727d79fe8.tar.gz
art-b5860fb459f1ed71f39d8a87b45bee6727d79fe8.tar.bz2
Register promotion support for 64-bit targets
Not sufficiently tested for 64-bit targets, but should be fairly close. A significant amount of refactoring could stil be done, (in later CLs). With this change we are not making any changes to the vmap scheme. As a result, it is a requirement that if a vreg is promoted to both a 32-bit view and the low half of a 64-bit view it must share the same physical register. We may change this restriction later on to allow for more flexibility for 32-bit Arm. For example, if v4, v5, v4/v5 and v5/v6 are all hot enough to promote, we'd end up with something like: v4 (as an int) -> r10 v4/v5 (as a long) -> r10 v5 (as an int) -> r11 v5/v6 (as a long) -> r11 Fix a couple of ARM64 bugs on the way... Change-Id: I6a152b9c164d9f1a053622266e165428045362f3
Diffstat (limited to 'runtime/stack.cc')
-rw-r--r--runtime/stack.cc33
1 files changed, 33 insertions, 0 deletions
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 132ac3e..d5405fb 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -159,11 +159,22 @@ bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
uintptr_t ptr_val;
bool success = false;
+ bool target64 = (kRuntimeISA == kArm64) || (kRuntimeISA == kX86_64);
if (is_float) {
success = GetFPR(reg, &ptr_val);
} else {
success = GetGPR(reg, &ptr_val);
}
+ if (success && target64) {
+ bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
+ bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
+ int64_t value_long = static_cast<int64_t>(ptr_val);
+ if (wide_lo) {
+ ptr_val = static_cast<uintptr_t>(value_long & 0xFFFFFFFF);
+ } else if (wide_hi) {
+ ptr_val = static_cast<uintptr_t>(value_long >> 32);
+ }
+ }
*val = ptr_val;
return success;
} else {
@@ -194,6 +205,28 @@ bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_val
bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
const uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
+ bool target64 = (kRuntimeISA == kArm64) || (kRuntimeISA == kX86_64);
+ // Deal with 32 or 64-bit wide registers in a way that builds on all targets.
+ if (target64) {
+ bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
+ bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
+ if (wide_lo || wide_hi) {
+ uintptr_t old_reg_val;
+ bool success = is_float ? GetFPR(reg, &old_reg_val) : GetGPR(reg, &old_reg_val);
+ if (!success) {
+ return false;
+ }
+ uint64_t new_vreg_portion = static_cast<uint64_t>(new_value);
+ uint64_t old_reg_val_as_wide = static_cast<uint64_t>(old_reg_val);
+ uint64_t mask = 0xffffffff;
+ if (wide_lo) {
+ mask = mask << 32;
+ } else {
+ new_vreg_portion = new_vreg_portion << 32;
+ }
+ new_value = static_cast<uintptr_t>((old_reg_val_as_wide & mask) | new_vreg_portion);
+ }
+ }
if (is_float) {
return SetFPR(reg, new_value);
} else {