summaryrefslogtreecommitdiffstats
path: root/compiler/jni
diff options
context:
space:
mode:
authorVladimir Marko <vmarko@google.com>2015-05-19 18:08:00 +0100
committerVladimir Marko <vmarko@google.com>2015-05-26 19:33:33 +0100
commit41b175aba41c9365a1c53b8a1afbd17129c87c14 (patch)
tree5c82606d39543fb932ddc0674694fc0758b1a866 /compiler/jni
parent54d65738eecc1fa79ed528ff2f6b9b4f7a4743be (diff)
downloadart-41b175aba41c9365a1c53b8a1afbd17129c87c14.zip
art-41b175aba41c9365a1c53b8a1afbd17129c87c14.tar.gz
art-41b175aba41c9365a1c53b8a1afbd17129c87c14.tar.bz2
ART: Clean up arm64 kNumberOfXRegisters usage.
Avoid undefined behavior for arm64 stemming from 1u << 32 in loops with upper bound kNumberOfXRegisters. Create iterators for enumerating bits in an integer either from high to low or from low to high and use them for <arch>Context::FillCalleeSaves() on all architectures. Refactor runtime/utils.{h,cc} by moving all bit-fiddling functions to runtime/base/bit_utils.{h,cc} (together with the new bit iterators) and all time-related functions to runtime/base/time_utils.{h,cc}. Improve test coverage and fix some corner cases for the bit-fiddling functions. Bug: 13925192 (cherry picked from commit 80afd02024d20e60b197d3adfbb43cc303cf29e0) Change-Id: I905257a21de90b5860ebe1e39563758f721eab82
Diffstat (limited to 'compiler/jni')
-rw-r--r--compiler/jni/quick/arm64/calling_convention_arm64.cc3
-rw-r--r--compiler/jni/quick/calling_convention.cc1
-rw-r--r--compiler/jni/quick/x86/calling_convention_x86.cc1
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.cc2
4 files changed, 3 insertions, 4 deletions
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index a6caff1..4344c90 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -158,7 +158,8 @@ Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static, bool is_syn
const char* shorty)
: JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
uint32_t core_spill_mask = CoreSpillMask();
- for (int x_reg = 0; x_reg < kNumberOfXRegisters; ++x_reg) {
+ DCHECK_EQ(XZR, kNumberOfXRegisters - 1); // Exclude XZR from the loop (avoid 1 << 32).
+ for (int x_reg = 0; x_reg < kNumberOfXRegisters - 1; ++x_reg) {
if (((1 << x_reg) & core_spill_mask) != 0) {
callee_save_regs_.push_back(
Arm64ManagedRegister::FromXRegister(static_cast<XRegister>(x_reg)));
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 436fc0c..2e146c4 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -23,7 +23,6 @@
#include "jni/quick/mips64/calling_convention_mips64.h"
#include "jni/quick/x86/calling_convention_x86.h"
#include "jni/quick/x86_64/calling_convention_x86_64.h"
-#include "utils.h"
namespace art {
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 8a45f0c..499dd7c 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -19,7 +19,6 @@
#include "base/logging.h"
#include "handle_scope-inl.h"
#include "utils/x86/managed_register_x86.h"
-#include "utils.h"
namespace art {
namespace x86 {
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index bbdf1fe..7e92d12 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -16,10 +16,10 @@
#include "calling_convention_x86_64.h"
+#include "base/bit_utils.h"
#include "base/logging.h"
#include "handle_scope-inl.h"
#include "utils/x86_64/managed_register_x86_64.h"
-#include "utils.h"
namespace art {
namespace x86_64 {